This commit is contained in:
恍兮惚兮 2024-07-20 20:57:08 +08:00
parent 4a4503dcc6
commit 7afd0ea856
8 changed files with 109 additions and 64 deletions

View File

@ -1,5 +1,5 @@
from qtsymbols import * from qtsymbols import *
import os, functools import os, functools, re
from myutils.config import globalconfig from myutils.config import globalconfig
from myutils.utils import splittranslatortypes from myutils.utils import splittranslatortypes
from gui.usefulwidget import ( from gui.usefulwidget import (
@ -37,16 +37,37 @@ def getall(l, item="fanyi", name=None):
return grids return grids
def validator(self, text):
regExp = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):(\d{1,5})$")
mch = regExp.match(text)
for _ in (1,):
if not mch:
break
_1, _2, _3, _4, _p = [int(_) for _ in mch.groups()]
if _p > 65535:
break
if any([_ > 255 for _ in [_1, _2, _3, _4]]):
break
globalconfig["proxy"] = text
self.createproxyedit_check.setText("")
return
self.createproxyedit_check.setText("Invalid")
def createproxyedit(self): def createproxyedit(self):
proxy = QLineEdit(globalconfig["proxy"]) proxy = QLineEdit(globalconfig["proxy"])
self.__proxyedit = proxy self.__proxyedit = proxy
proxy.textEdited.connect( proxy.textChanged.connect(functools.partial(validator, self))
lambda: globalconfig.__setitem__("proxy", self.__proxyedit.text())
)
_ifusesysproxy(self, globalconfig["usesysproxy"]) _ifusesysproxy(self, globalconfig["usesysproxy"])
return proxy return proxy
def createproxyedit_check(self):
self.createproxyedit_check = QLabel()
return self.createproxyedit_check
def _ifusesysproxy(self, x): def _ifusesysproxy(self, x):
self.__proxyedit.setEnabled(not x) self.__proxyedit.setEnabled(not x)
@ -73,7 +94,11 @@ def makeproxytab(self, basel):
) )
), ),
], ],
[("手动设置代理(ip:port)", 5), (functools.partial(createproxyedit, self), 5)], [
("手动设置代理(ip:port)", 5),
(functools.partial(createproxyedit, self), 5),
(functools.partial(createproxyedit_check, self), 5),
],
[], [],
[("使用代理的项目", -1)], [("使用代理的项目", -1)],
] ]

View File

@ -60,11 +60,15 @@ def splitapillm(l):
return is_gpt_likes, not_is_gpt_like return is_gpt_likes, not_is_gpt_like
def loadvisinternal(skipid=False, skipidid=None): def loadvisinternal(btnplus):
__vis = [] __vis = []
__uid = [] __uid = []
lixians, pre, mianfei, develop, shoufei = splittranslatortypes() lixians, pre, mianfei, develop, shoufei = splittranslatortypes()
is_gpt_likes, not_is_gpt_like = splitapillm(shoufei) if btnplus == "api":
is_gpt_likes, not_is_gpt_like = splitapillm(shoufei)
elif btnplus == "offline":
is_gpt_likes, not_is_gpt_like = splitapillm(lixians)
for _ in is_gpt_likes: for _ in is_gpt_likes:
_f = "./Lunatranslator/translator/{}.py".format(_) _f = "./Lunatranslator/translator/{}.py".format(_)
if not os.path.exists(_f): if not os.path.exists(_f):
@ -74,9 +78,9 @@ def loadvisinternal(skipid=False, skipidid=None):
return __vis, __uid return __vis, __uid
def getalistname(parent, callback, skipid=False, skipidid=None): def getalistname(parent, btnplus, callback):
__d = {"k": 0, "n": ""} __d = {"k": 0, "n": ""}
__vis, __uid = loadvisinternal(skipid, skipidid) __vis, __uid = loadvisinternal(btnplus)
def __wrap(callback, __d, __uid): def __wrap(callback, __d, __uid):
if len(__uid) == 0: if len(__uid) == 0:
@ -111,7 +115,7 @@ def getalistname(parent, callback, skipid=False, skipidid=None):
) )
def selectllmcallback(self, countnum, btn, fanyi, name): def selectllmcallback(self, countnum, btn, btnplus, fanyi, name):
uid = str(uuid.uuid4()) uid = str(uuid.uuid4())
_f1 = "./Lunatranslator/translator/{}.py".format(fanyi) _f1 = "./Lunatranslator/translator/{}.py".format(fanyi)
_f2 = "./Lunatranslator/translator/{}.py".format(uid) _f2 = "./Lunatranslator/translator/{}.py".format(uid)
@ -122,10 +126,11 @@ def selectllmcallback(self, countnum, btn, fanyi, name):
if not name: if not name:
name = globalconfig["fanyi"][fanyi]["name"] + "_copy" name = globalconfig["fanyi"][fanyi]["name"] + "_copy"
globalconfig["fanyi"][uid]["name"] = name globalconfig["fanyi"][uid]["name"] = name
globalconfig["fanyi"][uid]["type"] = btnplus
if fanyi in translatorsetting: if fanyi in translatorsetting:
translatorsetting[uid] = deepcopydict(translatorsetting[fanyi]) translatorsetting[uid] = deepcopydict(translatorsetting[fanyi])
layout: QGridLayout = self.damoxinggridinternal layout: QGridLayout = getattr(self, "damoxinggridinternal" + btnplus)
items = autoinitdialog_items(translatorsetting[uid]) items = autoinitdialog_items(translatorsetting[uid])
last = getIconButton( last = getIconButton(
@ -175,23 +180,32 @@ def selectllmcallback(self, countnum, btn, fanyi, name):
countnum.append(0) countnum.append(0)
def btnpluscallback(self, countnum, btn): def btnpluscallback(self, countnum, btn, btnplus):
getalistname(self, functools.partial(selectllmcallback, self, countnum, btn)) getalistname(
self,
btnplus,
functools.partial(selectllmcallback, self, countnum, btn, btnplus),
)
def createbtn(self, countnum): def createbtn(self, countnum, btnplus):
btn = QPushButton(self) btn = QPushButton(self)
btn.setIcon(qtawesome.icon("fa.plus")) btn.setIcon(qtawesome.icon("fa.plus"))
btn.clicked.connect(functools.partial(btnpluscallback, self, countnum, btn)) btn.clicked.connect(
functools.partial(btnpluscallback, self, countnum, btn, btnplus)
)
return btn return btn
def createbtnquest(self): def createbtnquest(self, btnplus):
btn = QPushButton(self) btn = QPushButton(self)
btn.setIcon(qtawesome.icon("fa.question")) btn.setIcon(qtawesome.icon("fa.question"))
btn.clicked.connect( if btnplus == "offline":
lambda: os.startfile(dynamiclink("{docs_server}/#/zh/guochandamoxing")) btn.clicked.connect(lambda: os.startfile(dynamiclink("{docs_server}/#/zh/offlinellm")))
) elif btnplus == "api":
btn.clicked.connect(
lambda: os.startfile(dynamiclink("{docs_server}/#/zh/guochandamoxing"))
)
self.btnquestion = btn self.btnquestion = btn
return btn return btn
@ -266,18 +280,18 @@ def initsome11(self, l, label=None, btnplus=False):
grids.append( grids.append(
[ [
("", 10), ("", 10),
(functools.partial(createbtn, self, countnum), 2), (functools.partial(createbtn, self, countnum, btnplus), 2),
(functools.partial(createbtnquest, self), 2), (functools.partial(createbtnquest, self, btnplus), 2),
] ]
) )
return grids return grids
def initsome2(self, l, label=None): def initsome2(self, l, label=None, btnplus=None):
is_gpt_likes, not_is_gpt_like = splitapillm(l) is_gpt_likes, not_is_gpt_like = splitapillm(l)
not_is_gpt_like = initsome11(self, not_is_gpt_like, label) not_is_gpt_like = initsome11(self, not_is_gpt_like, label)
is_gpt_likes = initsome11(self, is_gpt_likes, label, btnplus=True) is_gpt_likes = initsome11(self, is_gpt_likes, label, btnplus=btnplus)
grids = [ grids = [
[ [
( (
@ -292,7 +306,7 @@ def initsome2(self, l, label=None):
type="grid", type="grid",
title="大模型", title="大模型",
grid=is_gpt_likes, grid=is_gpt_likes,
internallayoutname="damoxinggridinternal", internallayoutname="damoxinggridinternal" + btnplus,
parent=self, parent=self,
), ),
0, 0,
@ -469,10 +483,10 @@ def setTabTwo_lazy(self, basel):
] ]
lixians, pre, mianfei, develop, shoufei = splittranslatortypes() lixians, pre, mianfei, develop, shoufei = splittranslatortypes()
offlinegrid = initsome11(self, lixians) offlinegrid = initsome2(self, lixians, btnplus="offline")
onlinegrid = initsome11(self, mianfei) onlinegrid = initsome11(self, mianfei)
developgrid += initsome11(self, develop) developgrid += initsome11(self, develop)
online_reg_grid += initsome2(self, shoufei) online_reg_grid += initsome2(self, shoufei, btnplus="api")
pretransgrid += initsome11(self, pre) pretransgrid += initsome11(self, pre)
vw, vl = getvboxwidget() vw, vl = getvboxwidget()
basel.addWidget(vw) basel.addWidget(vw)

View File

@ -1563,6 +1563,7 @@
"use": false, "use": false,
"color": "blue", "color": "blue",
"name": "DeepLX", "name": "DeepLX",
"type": "offline",
"useproxy": false "useproxy": false
}, },
"dev_chatgpt": { "dev_chatgpt": {
@ -1737,14 +1738,14 @@
"use": false, "use": false,
"color": "blue", "color": "blue",
"name": "腾讯混元大模型", "name": "腾讯混元大模型",
"is_gpt_like":true "is_gpt_like": true
}, },
"baiduqianfan": { "baiduqianfan": {
"type": "api", "type": "api",
"use": false, "use": false,
"color": "blue", "color": "blue",
"name": "百度千帆大模型", "name": "百度千帆大模型",
"is_gpt_like":true "is_gpt_like": true
}, },
"yandexapi": { "yandexapi": {
"type": "api", "type": "api",

View File

@ -818,8 +818,6 @@
}, },
"TGW": { "TGW": {
"args": { "args": {
"TGW懒人包": "https://www.bilibili.com/video/BV1Te411U7me",
"TGW懒人包1": "https://pan.baidu.com/s/1fe7iiHIAtoXW80Twsrv8Nw?pwd=pato",
"Github仓库": "https://github.com/oobabooga/text-generation-webui", "Github仓库": "https://github.com/oobabooga/text-generation-webui",
"API接口地址(默认为http://127.0.0.1:5000/)": "http://127.0.0.1:5000/", "API接口地址(默认为http://127.0.0.1:5000/)": "http://127.0.0.1:5000/",
"API超时(秒)": 30, "API超时(秒)": 30,
@ -844,14 +842,6 @@
"frequency_penalty": 0 "frequency_penalty": 0
}, },
"argstype": { "argstype": {
"TGW懒人包": {
"type": "label",
"islink": true
},
"TGW懒人包1": {
"type": "label",
"islink": true
},
"Github仓库": { "Github仓库": {
"type": "label", "type": "label",
"islink": true "islink": true

39
docs/zh/offlinellm.md Normal file
View File

@ -0,0 +1,39 @@
## 如何使用大模型离线翻译?
### Sakura大模型
> 这是最推荐使用的配置最简单效果最好也可以纯cpu运行轻量模型
具体部署方法可参考 https://github.com/SakuraLLM/SakuraLLM/wiki
### TGW
可参考 [text-generation-webui](https://github.com/oobabooga/text-generation-webui)进行部署,或使用[懒人包](https://pan.baidu.com/s/1fe7iiHIAtoXW80Twsrv8Nw?pwd=pato)+[非官方教程](https://www.bilibili.com/video/BV1Te411U7me)
!> 看B站UP的主的视频弄出了问题别来找我找UP主去。
### ChatGPT兼容接口
其实**Sakura大模型**和**TGW**的接口基本和**ChatGPT兼容接口**一样只不过多了一点预设的prompt和参数而已。可以把sakura和TGW的地址和模型填到这个的参数里面使用。
也可以使用**oneapi**、**ollama**之类的工具进行模型的部署,然后将地址和模型填入。
也可以使用Kaggle之类的平台来把模型部署到云端这时可能会需要用到SECRET_KEY其他时候可以无视SECRET_KEY参数。
#### 使用ollama部署的例子 [@asukaminato0721](https://github.com/HIllya51/LunaTranslator/issues/797)
需要在本地或者 ~~ssh 转发端口到本地(能看懂这句话那不需要继续了)~~
仅提供一种解法。别的兼容 gpt api 的都可以。
下载 ollama https://www.ollama.com/
以 llama3 举例。
https://www.ollama.com/library/llama3
下载好模型,后台跑起来后,在
![img](https://image.lunatranslator.xyz/zh/336483101-915f17c5-27a4-465f-9b4e-7a547ba5029f.png)
改成自己在跑的模型,端口改成对应的。就行了。

View File

@ -1,24 +0,0 @@
## 如何使用离线 chatgpt
需要在本地或者 ~~ssh 转发端口到本地(能看懂这句话那不需要继续了)~~
仅提供一种解法。别的兼容 gpt api 的都可以。
下载 ollama https://www.ollama.com/
以 llama3 举例。
https://www.ollama.com/library/llama3
下载好模型,后台跑起来后,在
![img](https://image.lunatranslator.xyz/zh/336483101-915f17c5-27a4-465f-9b4e-7a547ba5029f.png)
改成自己在跑的模型,端口改成对应的。就行了。
>PS: 建议使用 https://github.com/SakuraLLM/SakuraLLM/wiki
使用llama.cpp部署最简单。然后使用sakura大模型的专用接口里面有一些模型作者写的prompt。
<hr>
Reference: [asukaminato0721](https://github.com/HIllya51/LunaTranslator/issues/797)

View File

@ -3,6 +3,7 @@
- [获取软件和启动](/zh/start.md) - [获取软件和启动](/zh/start.md)
- [软件更新](/zh/update.md) - [软件更新](/zh/update.md)
- [常见问题](/zh/qas.md) - [常见问题](/zh/qas.md)
- [如何使用大模型离线翻译](/zh/qa3.md)
- [如何使用内嵌翻译](/zh/embedtranslate.md) - [如何使用内嵌翻译](/zh/embedtranslate.md)
- [如何改善OCR模式体验](/zh/gooduseocr.md) - [如何改善OCR模式体验](/zh/gooduseocr.md)
- [如何使用国产大模型API接口](/zh/guochandamoxing.md) - [如何使用国产大模型API接口](/zh/guochandamoxing.md)
@ -11,6 +12,5 @@
- [Windows OCR 如何安装额外的语言支持](/zh/windowsocr.md) - [Windows OCR 如何安装额外的语言支持](/zh/windowsocr.md)
- [如何使用 Mecab 分词&词性颜色标注](/zh/qa1.md) - [如何使用 Mecab 分词&词性颜色标注](/zh/qa1.md)
- [如何自动划词进 anki](/zh/qa2.md) - [如何自动划词进 anki](/zh/qa2.md)
- [如何使用离线 chatgpt](/zh/qa3.md)
- [如何使用 gemini 免费 api](/zh/qa4.md) - [如何使用 gemini 免费 api](/zh/qa4.md)
- [支持作者](/zh/support.md) - [支持作者](/zh/support.md)

View File

@ -29,7 +29,7 @@ include(generate_product_version)
set(VERSION_MAJOR 5) set(VERSION_MAJOR 5)
set(VERSION_MINOR 15) set(VERSION_MINOR 15)
set(VERSION_PATCH 0) set(VERSION_PATCH 1)
add_library(pch pch.cpp) add_library(pch pch.cpp)
target_precompile_headers(pch PUBLIC pch.h) target_precompile_headers(pch PUBLIC pch.h)