diff --git a/LunaTranslator/LunaTranslator/gui/setting_proxy.py b/LunaTranslator/LunaTranslator/gui/setting_proxy.py index 5ec9ea4a..4fbdbdce 100644 --- a/LunaTranslator/LunaTranslator/gui/setting_proxy.py +++ b/LunaTranslator/LunaTranslator/gui/setting_proxy.py @@ -1,5 +1,5 @@ from qtsymbols import * -import os, functools +import os, functools, re from myutils.config import globalconfig from myutils.utils import splittranslatortypes from gui.usefulwidget import ( @@ -37,16 +37,37 @@ def getall(l, item="fanyi", name=None): return grids +def validator(self, text): + regExp = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):(\d{1,5})$") + mch = regExp.match(text) + for _ in (1,): + + if not mch: + break + _1, _2, _3, _4, _p = [int(_) for _ in mch.groups()] + if _p > 65535: + break + if any([_ > 255 for _ in [_1, _2, _3, _4]]): + break + globalconfig["proxy"] = text + self.createproxyedit_check.setText("") + return + self.createproxyedit_check.setText("Invalid") + + def createproxyedit(self): proxy = QLineEdit(globalconfig["proxy"]) self.__proxyedit = proxy - proxy.textEdited.connect( - lambda: globalconfig.__setitem__("proxy", self.__proxyedit.text()) - ) + proxy.textChanged.connect(functools.partial(validator, self)) _ifusesysproxy(self, globalconfig["usesysproxy"]) return proxy +def createproxyedit_check(self): + self.createproxyedit_check = QLabel() + return self.createproxyedit_check + + def _ifusesysproxy(self, x): self.__proxyedit.setEnabled(not x) @@ -73,7 +94,11 @@ def makeproxytab(self, basel): ) ), ], - [("手动设置代理(ip:port)", 5), (functools.partial(createproxyedit, self), 5)], + [ + ("手动设置代理(ip:port)", 5), + (functools.partial(createproxyedit, self), 5), + (functools.partial(createproxyedit_check, self), 5), + ], [], [("使用代理的项目", -1)], ] diff --git a/LunaTranslator/LunaTranslator/gui/setting_translate.py b/LunaTranslator/LunaTranslator/gui/setting_translate.py index a7dddb5e..191b036f 100644 --- a/LunaTranslator/LunaTranslator/gui/setting_translate.py +++ b/LunaTranslator/LunaTranslator/gui/setting_translate.py @@ -60,11 +60,15 @@ def splitapillm(l): return is_gpt_likes, not_is_gpt_like -def loadvisinternal(skipid=False, skipidid=None): +def loadvisinternal(btnplus): __vis = [] __uid = [] lixians, pre, mianfei, develop, shoufei = splittranslatortypes() - is_gpt_likes, not_is_gpt_like = splitapillm(shoufei) + if btnplus == "api": + is_gpt_likes, not_is_gpt_like = splitapillm(shoufei) + elif btnplus == "offline": + is_gpt_likes, not_is_gpt_like = splitapillm(lixians) + for _ in is_gpt_likes: _f = "./Lunatranslator/translator/{}.py".format(_) if not os.path.exists(_f): @@ -74,9 +78,9 @@ def loadvisinternal(skipid=False, skipidid=None): return __vis, __uid -def getalistname(parent, callback, skipid=False, skipidid=None): +def getalistname(parent, btnplus, callback): __d = {"k": 0, "n": ""} - __vis, __uid = loadvisinternal(skipid, skipidid) + __vis, __uid = loadvisinternal(btnplus) def __wrap(callback, __d, __uid): if len(__uid) == 0: @@ -111,7 +115,7 @@ def getalistname(parent, callback, skipid=False, skipidid=None): ) -def selectllmcallback(self, countnum, btn, fanyi, name): +def selectllmcallback(self, countnum, btn, btnplus, fanyi, name): uid = str(uuid.uuid4()) _f1 = "./Lunatranslator/translator/{}.py".format(fanyi) _f2 = "./Lunatranslator/translator/{}.py".format(uid) @@ -122,10 +126,11 @@ def selectllmcallback(self, countnum, btn, fanyi, name): if not name: name = globalconfig["fanyi"][fanyi]["name"] + "_copy" globalconfig["fanyi"][uid]["name"] = name + globalconfig["fanyi"][uid]["type"] = btnplus if fanyi in translatorsetting: translatorsetting[uid] = deepcopydict(translatorsetting[fanyi]) - layout: QGridLayout = self.damoxinggridinternal + layout: QGridLayout = getattr(self, "damoxinggridinternal" + btnplus) items = autoinitdialog_items(translatorsetting[uid]) last = getIconButton( @@ -175,23 +180,32 @@ def selectllmcallback(self, countnum, btn, fanyi, name): countnum.append(0) -def btnpluscallback(self, countnum, btn): - getalistname(self, functools.partial(selectllmcallback, self, countnum, btn)) +def btnpluscallback(self, countnum, btn, btnplus): + getalistname( + self, + btnplus, + functools.partial(selectllmcallback, self, countnum, btn, btnplus), + ) -def createbtn(self, countnum): +def createbtn(self, countnum, btnplus): btn = QPushButton(self) btn.setIcon(qtawesome.icon("fa.plus")) - btn.clicked.connect(functools.partial(btnpluscallback, self, countnum, btn)) + btn.clicked.connect( + functools.partial(btnpluscallback, self, countnum, btn, btnplus) + ) return btn -def createbtnquest(self): +def createbtnquest(self, btnplus): btn = QPushButton(self) btn.setIcon(qtawesome.icon("fa.question")) - btn.clicked.connect( - lambda: os.startfile(dynamiclink("{docs_server}/#/zh/guochandamoxing")) - ) + if btnplus == "offline": + btn.clicked.connect(lambda: os.startfile(dynamiclink("{docs_server}/#/zh/offlinellm"))) + elif btnplus == "api": + btn.clicked.connect( + lambda: os.startfile(dynamiclink("{docs_server}/#/zh/guochandamoxing")) + ) self.btnquestion = btn return btn @@ -266,18 +280,18 @@ def initsome11(self, l, label=None, btnplus=False): grids.append( [ ("", 10), - (functools.partial(createbtn, self, countnum), 2), - (functools.partial(createbtnquest, self), 2), + (functools.partial(createbtn, self, countnum, btnplus), 2), + (functools.partial(createbtnquest, self, btnplus), 2), ] ) return grids -def initsome2(self, l, label=None): +def initsome2(self, l, label=None, btnplus=None): is_gpt_likes, not_is_gpt_like = splitapillm(l) not_is_gpt_like = initsome11(self, not_is_gpt_like, label) - is_gpt_likes = initsome11(self, is_gpt_likes, label, btnplus=True) + is_gpt_likes = initsome11(self, is_gpt_likes, label, btnplus=btnplus) grids = [ [ ( @@ -292,7 +306,7 @@ def initsome2(self, l, label=None): type="grid", title="大模型", grid=is_gpt_likes, - internallayoutname="damoxinggridinternal", + internallayoutname="damoxinggridinternal" + btnplus, parent=self, ), 0, @@ -469,10 +483,10 @@ def setTabTwo_lazy(self, basel): ] lixians, pre, mianfei, develop, shoufei = splittranslatortypes() - offlinegrid = initsome11(self, lixians) + offlinegrid = initsome2(self, lixians, btnplus="offline") onlinegrid = initsome11(self, mianfei) developgrid += initsome11(self, develop) - online_reg_grid += initsome2(self, shoufei) + online_reg_grid += initsome2(self, shoufei, btnplus="api") pretransgrid += initsome11(self, pre) vw, vl = getvboxwidget() basel.addWidget(vw) diff --git a/LunaTranslator/files/defaultconfig/config.json b/LunaTranslator/files/defaultconfig/config.json index 4fb5ce0c..6a6ceb84 100644 --- a/LunaTranslator/files/defaultconfig/config.json +++ b/LunaTranslator/files/defaultconfig/config.json @@ -1563,6 +1563,7 @@ "use": false, "color": "blue", "name": "DeepLX", + "type": "offline", "useproxy": false }, "dev_chatgpt": { @@ -1737,14 +1738,14 @@ "use": false, "color": "blue", "name": "腾讯混元大模型", - "is_gpt_like":true + "is_gpt_like": true }, "baiduqianfan": { "type": "api", "use": false, "color": "blue", "name": "百度千帆大模型", - "is_gpt_like":true + "is_gpt_like": true }, "yandexapi": { "type": "api", diff --git a/LunaTranslator/files/defaultconfig/translatorsetting.json b/LunaTranslator/files/defaultconfig/translatorsetting.json index ae18e51e..9a7e95a8 100644 --- a/LunaTranslator/files/defaultconfig/translatorsetting.json +++ b/LunaTranslator/files/defaultconfig/translatorsetting.json @@ -818,8 +818,6 @@ }, "TGW": { "args": { - "TGW懒人包": "https://www.bilibili.com/video/BV1Te411U7me", - "TGW懒人包1": "https://pan.baidu.com/s/1fe7iiHIAtoXW80Twsrv8Nw?pwd=pato", "Github仓库": "https://github.com/oobabooga/text-generation-webui", "API接口地址(默认为http://127.0.0.1:5000/)": "http://127.0.0.1:5000/", "API超时(秒)": 30, @@ -844,14 +842,6 @@ "frequency_penalty": 0 }, "argstype": { - "TGW懒人包": { - "type": "label", - "islink": true - }, - "TGW懒人包1": { - "type": "label", - "islink": true - }, "Github仓库": { "type": "label", "islink": true diff --git a/docs/zh/offlinellm.md b/docs/zh/offlinellm.md new file mode 100644 index 00000000..1bcb85ba --- /dev/null +++ b/docs/zh/offlinellm.md @@ -0,0 +1,39 @@ +## 如何使用大模型离线翻译? + +### Sakura大模型 + +> 这是最推荐使用的,配置最简单,效果最好,也可以纯cpu运行轻量模型 + +具体部署方法可参考 https://github.com/SakuraLLM/SakuraLLM/wiki + +### TGW + +可参考 [text-generation-webui](https://github.com/oobabooga/text-generation-webui)进行部署,或使用[懒人包](https://pan.baidu.com/s/1fe7iiHIAtoXW80Twsrv8Nw?pwd=pato)+[非官方教程](https://www.bilibili.com/video/BV1Te411U7me) + +!> 看B站UP的主的视频弄出了问题别来找我,找UP主去。 + +### ChatGPT兼容接口 + +其实**Sakura大模型**和**TGW**的接口基本和**ChatGPT兼容接口**一样,只不过多了一点预设的prompt和参数而已。可以把sakura和TGW的地址和模型填到这个的参数里面使用。 + +也可以使用**oneapi**、**ollama**之类的工具进行模型的部署,然后将地址和模型填入。 + +也可以使用Kaggle之类的平台来把模型部署到云端,这时可能会需要用到SECRET_KEY,其他时候可以无视SECRET_KEY参数。 + +#### 使用ollama部署的例子 [@asukaminato0721](https://github.com/HIllya51/LunaTranslator/issues/797) + +需要在本地或者 ~~ssh 转发端口到本地(能看懂这句话那不需要继续了)~~ + +仅提供一种解法。别的兼容 gpt api 的都可以。 + +下载 ollama https://www.ollama.com/ + +以 llama3 举例。 + +https://www.ollama.com/library/llama3 + +下载好模型,后台跑起来后,在 + +![img](https://image.lunatranslator.xyz/zh/336483101-915f17c5-27a4-465f-9b4e-7a547ba5029f.png) + +改成自己在跑的模型,端口改成对应的。就行了。 \ No newline at end of file diff --git a/docs/zh/qa3.md b/docs/zh/qa3.md deleted file mode 100644 index e4215fff..00000000 --- a/docs/zh/qa3.md +++ /dev/null @@ -1,24 +0,0 @@ -## 如何使用离线 chatgpt - -需要在本地或者 ~~ssh 转发端口到本地(能看懂这句话那不需要继续了)~~ - -仅提供一种解法。别的兼容 gpt api 的都可以。 - -下载 ollama https://www.ollama.com/ - -以 llama3 举例。 - -https://www.ollama.com/library/llama3 - -下载好模型,后台跑起来后,在 - -![img](https://image.lunatranslator.xyz/zh/336483101-915f17c5-27a4-465f-9b4e-7a547ba5029f.png) - -改成自己在跑的模型,端口改成对应的。就行了。 - ->PS: 建议使用 https://github.com/SakuraLLM/SakuraLLM/wiki -使用llama.cpp部署,最简单。然后使用sakura大模型的专用接口,里面有一些模型作者写的prompt。 - -