mirror of
https://github.com/HIllya51/LunaTranslator.git
synced 2024-12-28 08:04:13 +08:00
fix
This commit is contained in:
parent
8760a9fae6
commit
9782360215
@ -1,17 +1,23 @@
|
||||
from qtsymbols import *
|
||||
import functools, os, time, hashlib
|
||||
import requests, gobject
|
||||
import requests, gobject, qtawesome, uuid, shutil
|
||||
from myutils.wrapper import threader
|
||||
from myutils.config import globalconfig, translatorsetting
|
||||
from myutils.subproc import subproc_w
|
||||
from myutils.config import globalconfig
|
||||
from myutils.utils import selectdebugfile, splittranslatortypes, checkportavailable
|
||||
from myutils.utils import (
|
||||
selectdebugfile,
|
||||
splittranslatortypes,
|
||||
checkportavailable,
|
||||
dynamiclink,
|
||||
)
|
||||
from gui.pretransfile import sqlite2json
|
||||
from gui.inputdialog import autoinitdialog, autoinitdialog_items
|
||||
from gui.usefulwidget import (
|
||||
D_getspinbox,
|
||||
getspinbox,
|
||||
getIconButton,
|
||||
D_getcolorbutton,
|
||||
getcolorbutton,
|
||||
getsimpleswitch,
|
||||
D_getIconButton,
|
||||
D_getsimpleswitch,
|
||||
selectcolor,
|
||||
@ -27,19 +33,183 @@ def hashtext(a):
|
||||
return hashlib.md5(a.encode("utf8")).hexdigest()
|
||||
|
||||
|
||||
def initsome11(self, l, label=None):
|
||||
def deepcopydict(d):
|
||||
if isinstance(d, dict):
|
||||
nd = {}
|
||||
for k in d:
|
||||
nd[k] = deepcopydict(d[k])
|
||||
return nd
|
||||
elif isinstance(d, list):
|
||||
nd = []
|
||||
for k in d:
|
||||
nd.append(deepcopydict(k))
|
||||
return nd
|
||||
else:
|
||||
return d
|
||||
|
||||
|
||||
def splitapillm(l):
|
||||
not_is_gpt_like = []
|
||||
is_gpt_likes = []
|
||||
for fanyi in l:
|
||||
is_gpt_like = globalconfig["fanyi"][fanyi].get("is_gpt_like", False)
|
||||
if is_gpt_like:
|
||||
is_gpt_likes.append(fanyi)
|
||||
else:
|
||||
not_is_gpt_like.append(fanyi)
|
||||
return is_gpt_likes, not_is_gpt_like
|
||||
|
||||
|
||||
def loadvisinternal(skipid=False, skipidid=None):
|
||||
__vis = []
|
||||
__uid = []
|
||||
lixians, pre, mianfei, develop, shoufei = splittranslatortypes()
|
||||
is_gpt_likes, not_is_gpt_like = splitapillm(shoufei)
|
||||
for _ in is_gpt_likes:
|
||||
_f = "./Lunatranslator/translator/{}.py".format(_)
|
||||
if not os.path.exists(_f):
|
||||
continue
|
||||
__vis.append(globalconfig["fanyi"][_]["name"])
|
||||
__uid.append(_)
|
||||
return __vis, __uid
|
||||
|
||||
|
||||
def getalistname(parent, callback, skipid=False, skipidid=None):
|
||||
__d = {"k": 0, "n": ""}
|
||||
__vis, __uid = loadvisinternal(skipid, skipidid)
|
||||
|
||||
def __wrap(callback, __d, __uid):
|
||||
if len(__uid) == 0:
|
||||
return
|
||||
|
||||
uid = __uid[__d["k"]]
|
||||
callback(uid, __d["n"])
|
||||
|
||||
autoinitdialog(
|
||||
parent,
|
||||
"复制",
|
||||
600,
|
||||
[
|
||||
{
|
||||
"type": "combo",
|
||||
"name": "目标",
|
||||
"d": __d,
|
||||
"k": "k",
|
||||
"list": __vis,
|
||||
},
|
||||
{
|
||||
"name": "名称",
|
||||
"type": "lineedit",
|
||||
"d": __d,
|
||||
"k": "n",
|
||||
},
|
||||
{
|
||||
"type": "okcancel",
|
||||
"callback": functools.partial(__wrap, callback, __d, __uid),
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def selectllmcallback(self, countnum, btn, fanyi, name):
|
||||
uid = str(uuid.uuid4())
|
||||
_f1 = "./Lunatranslator/translator/{}.py".format(fanyi)
|
||||
_f2 = "./Lunatranslator/translator/{}.py".format(uid)
|
||||
shutil.copy(_f1, _f2)
|
||||
globalconfig["fanyi"][uid] = deepcopydict(globalconfig["fanyi"][fanyi])
|
||||
globalconfig["fanyi"][uid]["use"] = False
|
||||
|
||||
if not name:
|
||||
name = globalconfig["fanyi"][fanyi]["name"] + "_copy"
|
||||
globalconfig["fanyi"][uid]["name"] = name
|
||||
if fanyi in translatorsetting:
|
||||
translatorsetting[uid] = deepcopydict(translatorsetting[fanyi])
|
||||
|
||||
layout: QGridLayout = self.damoxinggridinternal
|
||||
|
||||
items = autoinitdialog_items(translatorsetting[uid])
|
||||
last = getIconButton(
|
||||
callback=functools.partial(
|
||||
autoinitdialog,
|
||||
self,
|
||||
(globalconfig["fanyi"][uid]["name"]),
|
||||
800,
|
||||
items,
|
||||
),
|
||||
icon="fa.gear",
|
||||
)
|
||||
|
||||
name = LLabel(globalconfig["fanyi"][uid]["name"])
|
||||
swc = getsimpleswitch(
|
||||
globalconfig["fanyi"][uid],
|
||||
"use",
|
||||
callback=functools.partial(gobject.baseobject.prepare, uid),
|
||||
)
|
||||
color = getcolorbutton(
|
||||
globalconfig["fanyi"][uid],
|
||||
"color",
|
||||
parent=self,
|
||||
name="fanyicolor_" + uid,
|
||||
callback=functools.partial(
|
||||
selectcolor,
|
||||
self,
|
||||
globalconfig["fanyi"][uid],
|
||||
"color",
|
||||
None,
|
||||
self,
|
||||
"fanyicolor_" + uid,
|
||||
),
|
||||
)
|
||||
|
||||
if len(countnum) % 3 == 0:
|
||||
layout.addWidget(btn, layout.rowCount(), 0, 1, 1)
|
||||
layout.addWidget(self.btnquestion, layout.rowCount() - 1, 1, 1, 1)
|
||||
offset = 5 * (len(countnum) % 3)
|
||||
layout.addWidget(name, layout.rowCount() - 2, offset + 0)
|
||||
layout.addWidget(swc, layout.rowCount() - 2, offset + 1)
|
||||
layout.addWidget(color, layout.rowCount() - 2, offset + 2)
|
||||
layout.addWidget(last, layout.rowCount() - 2, offset + 3)
|
||||
if len(countnum) % 3 != 2:
|
||||
layout.addWidget(QLabel(), layout.rowCount() - 2, offset + 4)
|
||||
|
||||
countnum.append(0)
|
||||
|
||||
|
||||
def btnpluscallback(self, countnum, btn):
|
||||
getalistname(self, functools.partial(selectllmcallback, self, countnum, btn))
|
||||
|
||||
|
||||
def createbtn(self, countnum):
|
||||
btn = QPushButton(self)
|
||||
btn.setIcon(qtawesome.icon("fa.plus"))
|
||||
btn.clicked.connect(functools.partial(btnpluscallback, self, countnum, btn))
|
||||
return btn
|
||||
|
||||
|
||||
def createbtnquest(self):
|
||||
btn = QPushButton(self)
|
||||
btn.setIcon(qtawesome.icon("fa.question"))
|
||||
btn.clicked.connect(
|
||||
lambda: os.startfile(dynamiclink("{docs_server}/#/zh/guochandamoxing"))
|
||||
)
|
||||
self.btnquestion = btn
|
||||
return btn
|
||||
|
||||
|
||||
def initsome11(self, l, label=None, btnplus=False):
|
||||
grids = []
|
||||
if label:
|
||||
grids.append([(label, 8)])
|
||||
i = 0
|
||||
line = []
|
||||
countnum = []
|
||||
for fanyi in l:
|
||||
|
||||
_f = "./Lunatranslator/translator/{}.py".format(fanyi)
|
||||
if not os.path.exists(_f):
|
||||
continue
|
||||
i += 1
|
||||
|
||||
countnum.append(0)
|
||||
if fanyi in translatorsetting:
|
||||
|
||||
items = autoinitdialog_items(translatorsetting[fanyi])
|
||||
@ -92,6 +262,43 @@ def initsome11(self, l, label=None):
|
||||
line += [""]
|
||||
if len(line):
|
||||
grids.append(line)
|
||||
if btnplus:
|
||||
grids.append(
|
||||
[
|
||||
(functools.partial(createbtn, self, countnum), 1),
|
||||
(functools.partial(createbtnquest, self), 1),
|
||||
]
|
||||
)
|
||||
|
||||
return grids
|
||||
|
||||
|
||||
def initsome2(self, l, label=None):
|
||||
is_gpt_likes, not_is_gpt_like = splitapillm(l)
|
||||
not_is_gpt_like = initsome11(self, not_is_gpt_like, label)
|
||||
is_gpt_likes = initsome11(self, is_gpt_likes, label, btnplus=True)
|
||||
grids = [
|
||||
[
|
||||
(
|
||||
dict(type="grid", title="传统", grid=not_is_gpt_like),
|
||||
0,
|
||||
"group",
|
||||
)
|
||||
],
|
||||
[
|
||||
(
|
||||
dict(
|
||||
type="grid",
|
||||
title="大模型",
|
||||
grid=is_gpt_likes,
|
||||
internallayoutname="damoxinggridinternal",
|
||||
parent=self,
|
||||
),
|
||||
0,
|
||||
"group",
|
||||
)
|
||||
],
|
||||
]
|
||||
return grids
|
||||
|
||||
|
||||
@ -264,7 +471,7 @@ def setTabTwo_lazy(self, basel):
|
||||
offlinegrid = initsome11(self, lixians)
|
||||
onlinegrid = initsome11(self, mianfei)
|
||||
developgrid += initsome11(self, develop)
|
||||
online_reg_grid += initsome11(self, shoufei)
|
||||
online_reg_grid += initsome2(self, shoufei)
|
||||
pretransgrid += initsome11(self, pre)
|
||||
vw, vl = getvboxwidget()
|
||||
basel.addWidget(vw)
|
||||
|
@ -1440,6 +1440,7 @@ def makegroupingrid(args):
|
||||
parent = args.get("parent", None)
|
||||
groupname = args.get("name", None)
|
||||
enable = args.get("enable", True)
|
||||
internallayoutname = args.get("internallayoutname", None)
|
||||
group = LGroupBox()
|
||||
|
||||
if not enable:
|
||||
@ -1457,10 +1458,14 @@ def makegroupingrid(args):
|
||||
grid = QGridLayout()
|
||||
group.setLayout(grid)
|
||||
automakegrid(grid, lis)
|
||||
if internallayoutname:
|
||||
setattr(parent, internallayoutname, grid)
|
||||
elif _type == "form":
|
||||
lay = LFormLayout()
|
||||
group.setLayout(lay)
|
||||
makeforms(lay, lis, args)
|
||||
if internallayoutname:
|
||||
setattr(parent, internallayoutname, lay)
|
||||
return group
|
||||
|
||||
|
||||
|
132
LunaTranslator/LunaTranslator/translator/baiduqianfan.py
Normal file
132
LunaTranslator/LunaTranslator/translator/baiduqianfan.py
Normal file
@ -0,0 +1,132 @@
|
||||
from translator.basetranslator import basetrans
|
||||
import json, requests
|
||||
from traceback import print_exc
|
||||
|
||||
|
||||
class TS(basetrans):
|
||||
|
||||
def langmap(self):
|
||||
return {
|
||||
"zh": "Simplified Chinese",
|
||||
"ja": "Japanese",
|
||||
"en": "English",
|
||||
"ru": "Russian",
|
||||
"es": "Spanish",
|
||||
"ko": "Korean",
|
||||
"fr": "French",
|
||||
"cht": "Traditional Chinese",
|
||||
"vi": "Vietnamese",
|
||||
"tr": "Turkish",
|
||||
"pl": "Polish",
|
||||
"uk": "Ukrainian",
|
||||
"it": "Italian",
|
||||
"ar": "Arabic",
|
||||
"th": "Thai",
|
||||
}
|
||||
|
||||
def __init__(self, typename):
|
||||
self.context = []
|
||||
self.access = {}
|
||||
super().__init__(typename)
|
||||
|
||||
def createdata(self, message):
|
||||
try:
|
||||
temperature = float(self.config["Temperature"])
|
||||
except:
|
||||
temperature = 0.3
|
||||
|
||||
if self.config["use_user_prompt"]:
|
||||
system = self.config["user_prompt"]
|
||||
else:
|
||||
system = "You are a translator. Please help me translate the following {} text into {}, and you should only tell me the translation.".format(
|
||||
self.srclang, self.tgtlang
|
||||
)
|
||||
data = dict(
|
||||
system=system,
|
||||
model=self.config["model"],
|
||||
messages=message,
|
||||
# optional
|
||||
max_tokens=self.config["max_tokens"],
|
||||
n=1,
|
||||
# stop=None,
|
||||
top_p=self.config["top_p"],
|
||||
temperature=temperature,
|
||||
frequency_penalty=self.config["frequency_penalty"],
|
||||
stream=self.config["usingstream"],
|
||||
)
|
||||
return data
|
||||
|
||||
def commonparseresponse(self, query, response: requests.ResponseBase, usingstream):
|
||||
if usingstream:
|
||||
message = ""
|
||||
for chunk in response.iter_lines():
|
||||
response_data = chunk.decode("utf-8").strip()
|
||||
if not response_data:
|
||||
continue
|
||||
try:
|
||||
json_data = json.loads(response_data[6:])
|
||||
msg = json_data["result"].replace("\n\n", "\n").strip()
|
||||
yield msg
|
||||
message += msg
|
||||
|
||||
except:
|
||||
print_exc()
|
||||
raise Exception(response_data)
|
||||
else:
|
||||
try:
|
||||
message = response.json()["result"].replace("\n\n", "\n").strip()
|
||||
yield message
|
||||
except:
|
||||
raise Exception(response.text)
|
||||
self.context.append({"role": "user", "content": query})
|
||||
self.context.append({"role": "assistant", "content": message})
|
||||
|
||||
def get_access_token(self, API_KEY, SECRET_KEY):
|
||||
url = "https://aip.baidubce.com/oauth/2.0/token"
|
||||
params = {
|
||||
"grant_type": "client_credentials",
|
||||
"client_id": API_KEY,
|
||||
"client_secret": SECRET_KEY,
|
||||
}
|
||||
js = self.proxysession.post(url, params=params).json()
|
||||
|
||||
try:
|
||||
return js["access_token"]
|
||||
except:
|
||||
raise Exception(js)
|
||||
|
||||
def checkchange(self):
|
||||
self.checkempty(["model", "SECRET_KEY", "API_KEY"])
|
||||
SECRET_KEY, API_KEY = (
|
||||
self.multiapikeycurrent["SECRET_KEY"],
|
||||
self.multiapikeycurrent["API_KEY"],
|
||||
)
|
||||
if not self.access.get((API_KEY, SECRET_KEY)):
|
||||
acss = self.get_access_token(API_KEY, SECRET_KEY)
|
||||
self.access[(API_KEY, SECRET_KEY)] = acss
|
||||
return self.access[(API_KEY, SECRET_KEY)]
|
||||
|
||||
def translate(self, query):
|
||||
acss = self.checkchange()
|
||||
self.contextnum = int(self.config["context_num"])
|
||||
|
||||
message = []
|
||||
for _i in range(min(len(self.context) // 2, self.contextnum)):
|
||||
i = (
|
||||
len(self.context) // 2
|
||||
- min(len(self.context) // 2, self.contextnum)
|
||||
+ _i
|
||||
)
|
||||
message.append(self.context[i * 2])
|
||||
message.append(self.context[i * 2 + 1])
|
||||
message.append({"role": "user", "content": query})
|
||||
|
||||
usingstream = self.config["usingstream"]
|
||||
url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/{self.config['model']}?access_token={acss}"
|
||||
|
||||
response = self.proxysession.post(
|
||||
url,
|
||||
json=self.createdata(message),
|
||||
stream=usingstream,
|
||||
)
|
||||
return self.commonparseresponse(query, response, usingstream)
|
@ -69,52 +69,6 @@ class gptcommon(basetrans):
|
||||
api_url += "/v1"
|
||||
return api_url
|
||||
|
||||
def alicreateheaders(self):
|
||||
h = self.createheaders()
|
||||
if self.config["流式输出"]:
|
||||
h.update({"Accept": "text/event-stream"})
|
||||
return h
|
||||
|
||||
def alicreatedata(self, message):
|
||||
|
||||
data = dict(model=self.config["model"], input=dict(messages=message))
|
||||
if self.config["流式输出"]:
|
||||
data.update(dict(parameters=dict(incremental_output=True)))
|
||||
|
||||
return data
|
||||
|
||||
def aliparseresponse(self, query, response: requests.ResponseBase, usingstream):
|
||||
if usingstream:
|
||||
message = ""
|
||||
for chunk in response.iter_lines():
|
||||
response_data = chunk.decode("utf-8").strip()
|
||||
if not response_data:
|
||||
continue
|
||||
if response_data.startswith("data:") == False:
|
||||
continue
|
||||
try:
|
||||
json_data = json.loads(response_data[5:])
|
||||
msg = json_data["output"]["text"]
|
||||
yield msg
|
||||
message += msg
|
||||
except:
|
||||
print_exc()
|
||||
raise Exception(response_data)
|
||||
|
||||
if json_data["output"]["finish_reason"] == "stop":
|
||||
break
|
||||
|
||||
else:
|
||||
try:
|
||||
message = (
|
||||
response.json()["output"]["text"].replace("\n\n", "\n").strip()
|
||||
)
|
||||
yield message
|
||||
except:
|
||||
raise Exception(response.text)
|
||||
self.context.append({"role": "user", "content": query})
|
||||
self.context.append({"role": "assistant", "content": message})
|
||||
|
||||
def commonparseresponse(self, query, response: requests.ResponseBase, usingstream):
|
||||
if usingstream:
|
||||
message = ""
|
||||
@ -175,24 +129,15 @@ class gptcommon(basetrans):
|
||||
|
||||
usingstream = self.config["流式输出"]
|
||||
url = self.config["API接口地址"]
|
||||
parseresponse = self.commonparseresponse
|
||||
createheaders = self.createheaders
|
||||
createdata = self.createdata
|
||||
if url.endswith("/chat/completions"):
|
||||
pass
|
||||
elif url.endswith("/text-generation/generation") or 'dashscope.aliyuncs.com' in url:
|
||||
# https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation
|
||||
# 阿里云百炼
|
||||
parseresponse = self.aliparseresponse
|
||||
createheaders = self.alicreateheaders
|
||||
createdata = self.alicreatedata
|
||||
else:
|
||||
url = self.checkv1(url) + "/chat/completions"
|
||||
response = self.proxysession.post(
|
||||
url,
|
||||
headers=createheaders(),
|
||||
headers=self.createheaders(),
|
||||
params=self.createparam(),
|
||||
json=createdata(message),
|
||||
json=self.createdata(message),
|
||||
stream=usingstream,
|
||||
)
|
||||
return parseresponse(query, response, usingstream)
|
||||
return self.commonparseresponse(query, response, usingstream)
|
||||
|
185
LunaTranslator/LunaTranslator/translator/txhunyuan.py
Normal file
185
LunaTranslator/LunaTranslator/translator/txhunyuan.py
Normal file
@ -0,0 +1,185 @@
|
||||
from translator.basetranslator import basetrans
|
||||
import json
|
||||
|
||||
from datetime import datetime
|
||||
import hashlib, sys, hmac, time, json
|
||||
|
||||
|
||||
def sign_tc3(secret_key, date, service, str2sign):
|
||||
def _hmac_sha256(key, msg):
|
||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256)
|
||||
|
||||
def _get_signature_key(key, date, service):
|
||||
k_date = _hmac_sha256(("TC3" + key).encode("utf-8"), date)
|
||||
k_service = _hmac_sha256(k_date.digest(), service)
|
||||
k_signing = _hmac_sha256(k_service.digest(), "tc3_request")
|
||||
return k_signing.digest()
|
||||
|
||||
signing_key = _get_signature_key(secret_key, date, service)
|
||||
signature = _hmac_sha256(signing_key, str2sign).hexdigest()
|
||||
return signature
|
||||
|
||||
|
||||
def _get_tc3_signature(
|
||||
header, method, canonical_uri, payload, secret_key, date, service, options=None
|
||||
):
|
||||
options = options or {}
|
||||
canonical_querystring = ""
|
||||
|
||||
if sys.version_info[0] == 3 and isinstance(payload, type("")):
|
||||
payload = payload.encode("utf8")
|
||||
|
||||
payload_hash = hashlib.sha256(payload).hexdigest()
|
||||
|
||||
canonical_headers = "content-type:%s\nhost:%s\n" % (
|
||||
header["Content-Type"],
|
||||
header["Host"],
|
||||
)
|
||||
signed_headers = "content-type;host"
|
||||
canonical_request = "%s\n%s\n%s\n%s\n%s\n%s" % (
|
||||
method,
|
||||
canonical_uri,
|
||||
canonical_querystring,
|
||||
canonical_headers,
|
||||
signed_headers,
|
||||
payload_hash,
|
||||
)
|
||||
|
||||
algorithm = "TC3-HMAC-SHA256"
|
||||
credential_scope = date + "/" + service + "/tc3_request"
|
||||
if sys.version_info[0] == 3:
|
||||
canonical_request = canonical_request.encode("utf8")
|
||||
digest = hashlib.sha256(canonical_request).hexdigest()
|
||||
string2sign = "%s\n%s\n%s\n%s" % (
|
||||
algorithm,
|
||||
header["X-TC-Timestamp"],
|
||||
credential_scope,
|
||||
digest,
|
||||
)
|
||||
|
||||
return sign_tc3(secret_key, date, service, string2sign)
|
||||
|
||||
|
||||
def _build_req_with_tc3_signature(key, _id, action, params, options=None):
|
||||
header = {}
|
||||
header["Content-Type"] = "application/json"
|
||||
|
||||
endpoint = "hunyuan.tencentcloudapi.com"
|
||||
timestamp = int(time.time())
|
||||
header["Host"] = endpoint
|
||||
header["X-TC-Action"] = action[0].upper() + action[1:]
|
||||
header["X-TC-RequestClient"] = "SDK_PYTHON_3.0.1193"
|
||||
header["X-TC-Timestamp"] = str(timestamp)
|
||||
header["X-TC-Version"] = "2023-09-01"
|
||||
data = json.dumps(params).encode("utf8")
|
||||
|
||||
service = "hunyuan"
|
||||
date = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d")
|
||||
signature = _get_tc3_signature(
|
||||
header, "POST", "/", data, key, date, service, options
|
||||
)
|
||||
|
||||
auth = (
|
||||
"TC3-HMAC-SHA256 Credential=%s/%s/%s/tc3_request, SignedHeaders=content-type;host, Signature=%s"
|
||||
% (_id, date, service, signature)
|
||||
)
|
||||
header["Authorization"] = auth
|
||||
return header
|
||||
|
||||
|
||||
class TS(basetrans):
|
||||
|
||||
def langmap(self):
|
||||
return {
|
||||
"zh": "Simplified Chinese",
|
||||
"ja": "Japanese",
|
||||
"en": "English",
|
||||
"ru": "Russian",
|
||||
"es": "Spanish",
|
||||
"ko": "Korean",
|
||||
"fr": "French",
|
||||
"cht": "Traditional Chinese",
|
||||
"vi": "Vietnamese",
|
||||
"tr": "Turkish",
|
||||
"pl": "Polish",
|
||||
"uk": "Ukrainian",
|
||||
"it": "Italian",
|
||||
"ar": "Arabic",
|
||||
"th": "Thai",
|
||||
}
|
||||
|
||||
def __init__(self, typename):
|
||||
self.context = []
|
||||
super().__init__(typename)
|
||||
|
||||
def translate(self, query):
|
||||
self.checkempty(["secret_id", "secret_key"])
|
||||
self.contextnum = int(self.config["context_num"])
|
||||
|
||||
if self.config["use_user_prompt"]:
|
||||
message = [{"Role": "user", "Content": self.config["user_prompt"]}]
|
||||
else:
|
||||
message = [
|
||||
{
|
||||
"Role": "system",
|
||||
"Content": "You are a translator. Please help me translate the following {} text into {}, and you should only tell me the translation.".format(
|
||||
self.srclang, self.tgtlang
|
||||
),
|
||||
},
|
||||
]
|
||||
|
||||
for _i in range(min(len(self.context) // 2, self.contextnum)):
|
||||
i = (
|
||||
len(self.context) // 2
|
||||
- min(len(self.context) // 2, self.contextnum)
|
||||
+ _i
|
||||
)
|
||||
message.append(self.context[i * 2])
|
||||
message.append(self.context[i * 2 + 1])
|
||||
message.append({"Role": "user", "Content": query})
|
||||
usingstream = self.config["usingstream"]
|
||||
json_data = {
|
||||
"Model": self.config["model"],
|
||||
"Messages": message,
|
||||
"Stream": usingstream,
|
||||
"TopP": self.config["top_p"],
|
||||
"Temperature": self.config["Temperature"],
|
||||
}
|
||||
headers = _build_req_with_tc3_signature(
|
||||
self.multiapikeycurrent["secret_id"],
|
||||
self.multiapikeycurrent["secret_key"],
|
||||
"ChatCompletions",
|
||||
json_data,
|
||||
)
|
||||
response = self.proxysession.post(
|
||||
"https://hunyuan.tencentcloudapi.com/",
|
||||
headers=headers,
|
||||
data=json.dumps(json_data),
|
||||
stream=usingstream,
|
||||
)
|
||||
|
||||
if usingstream:
|
||||
message = ""
|
||||
for i in response.iter_lines():
|
||||
|
||||
if not i:
|
||||
continue
|
||||
i = i.decode("utf8")[6:]
|
||||
try:
|
||||
mes = json.loads(i)["Choices"][0]["Delta"]["Content"]
|
||||
except:
|
||||
raise Exception(i)
|
||||
yield mes
|
||||
message += mes
|
||||
|
||||
else:
|
||||
try:
|
||||
message = response.json()["Response"]["Choices"][0]["Message"][
|
||||
"Content"
|
||||
]
|
||||
yield message
|
||||
except:
|
||||
raise Exception(response.text)
|
||||
|
||||
self.context.append({"Role": "user", "Content": query})
|
||||
self.context.append({"Role": "assistant", "Content": message})
|
@ -1732,6 +1732,20 @@
|
||||
"color": "blue",
|
||||
"name": "腾讯api"
|
||||
},
|
||||
"txhunyuan": {
|
||||
"type": "api",
|
||||
"use": false,
|
||||
"color": "blue",
|
||||
"name": "腾讯混元大模型",
|
||||
"is_gpt_like":true
|
||||
},
|
||||
"baiduqianfan": {
|
||||
"type": "api",
|
||||
"use": false,
|
||||
"color": "blue",
|
||||
"name": "百度千帆大模型",
|
||||
"is_gpt_like":true
|
||||
},
|
||||
"yandexapi": {
|
||||
"type": "api",
|
||||
"use": false,
|
||||
|
@ -263,6 +263,51 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"txhunyuan": {
|
||||
"args": {
|
||||
"secret_id": "",
|
||||
"secret_key": "",
|
||||
"Temperature": 0.3,
|
||||
"top_p": 0.3,
|
||||
"model": "hunyuan-lite",
|
||||
"context_num": 0,
|
||||
"use_user_prompt": false,
|
||||
"user_prompt": "",
|
||||
"usingstream": false
|
||||
},
|
||||
"argstype": {
|
||||
"top_p": {
|
||||
"type": "spin",
|
||||
"min": 0,
|
||||
"max": 1,
|
||||
"step": 0.01
|
||||
},
|
||||
"usingstream": {
|
||||
"name": "流式输出",
|
||||
"type": "switch"
|
||||
},
|
||||
"user_prompt": {
|
||||
"name": "自定义promt"
|
||||
},
|
||||
"use_user_prompt": {
|
||||
"name": "使用自定义promt",
|
||||
"type": "switch"
|
||||
},
|
||||
"context_num": {
|
||||
"name": "附带上下文个数",
|
||||
"type": "intspin",
|
||||
"min": 0,
|
||||
"max": 10,
|
||||
"step": 1
|
||||
},
|
||||
"Temperature": {
|
||||
"type": "spin",
|
||||
"min": 0,
|
||||
"max": 1,
|
||||
"step": 0.1
|
||||
}
|
||||
}
|
||||
},
|
||||
"claude": {
|
||||
"args": {
|
||||
"BASE_URL": "https://api.anthropic.com",
|
||||
@ -347,9 +392,64 @@
|
||||
"max": 10,
|
||||
"step": 1
|
||||
},
|
||||
"使用说明": {
|
||||
"type": "label",
|
||||
"islink": false
|
||||
"Temperature": {
|
||||
"type": "spin",
|
||||
"min": 0,
|
||||
"max": 1,
|
||||
"step": 0.1
|
||||
}
|
||||
}
|
||||
},
|
||||
"baiduqianfan": {
|
||||
"args": {
|
||||
"model": "ernie-3.5-8k-0329",
|
||||
"context_num": 0,
|
||||
"API_KEY": "",
|
||||
"SECRET_KEY": "",
|
||||
"use_user_prompt": false,
|
||||
"user_prompt": "",
|
||||
"usingstream": false,
|
||||
"Temperature": 0.3,
|
||||
"top_p": 0.3,
|
||||
"max_tokens": 128,
|
||||
"frequency_penalty": 0
|
||||
},
|
||||
"argstype": {
|
||||
"top_p": {
|
||||
"type": "spin",
|
||||
"min": 0,
|
||||
"max": 1,
|
||||
"step": 0.01
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "spin",
|
||||
"min": 0,
|
||||
"max": 2,
|
||||
"step": 0.05
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "intspin",
|
||||
"min": 1,
|
||||
"max": 4096,
|
||||
"step": 1
|
||||
},
|
||||
"user_prompt": {
|
||||
"name": "自定义promt"
|
||||
},
|
||||
"usingstream": {
|
||||
"name": "流式输出",
|
||||
"type": "switch"
|
||||
},
|
||||
"use_user_prompt": {
|
||||
"name": "使用自定义promt",
|
||||
"type": "switch"
|
||||
},
|
||||
"context_num": {
|
||||
"name": "附带上下文个数",
|
||||
"type": "intspin",
|
||||
"min": 0,
|
||||
"max": 10,
|
||||
"step": 1
|
||||
},
|
||||
"Temperature": {
|
||||
"type": "spin",
|
||||
@ -382,10 +482,6 @@
|
||||
"max": 10,
|
||||
"step": 1
|
||||
},
|
||||
"使用说明": {
|
||||
"type": "label",
|
||||
"islink": false
|
||||
},
|
||||
"Temperature": {
|
||||
"type": "spin",
|
||||
"min": 0,
|
||||
@ -661,10 +757,6 @@
|
||||
"type": "label",
|
||||
"islink": true
|
||||
},
|
||||
"使用说明": {
|
||||
"type": "label",
|
||||
"islink": false
|
||||
},
|
||||
"API超时(秒)": {
|
||||
"type": "intspin",
|
||||
"min": 30,
|
||||
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "دائما الكشف",
|
||||
"动态检测": "دينامية اختبار",
|
||||
"从不检测": "لم تكتشف",
|
||||
"找不到文本?": "لا يمكن العثور على النص ؟"
|
||||
"找不到文本?": "لا يمكن العثور على النص ؟",
|
||||
"腾讯混元大模型": "تينسنت الهجين نموذج كبير",
|
||||
"传统": "تقليدي",
|
||||
"大模型": "نموذج كبير",
|
||||
"百度千帆大模型": "نموذج بايدو"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "總是檢測",
|
||||
"动态检测": "動態檢測",
|
||||
"从不检测": "從不檢測",
|
||||
"找不到文本?": "找不到文字?"
|
||||
"找不到文本?": "找不到文字?",
|
||||
"腾讯混元大模型": "騰訊混元大模型",
|
||||
"传统": "傳統",
|
||||
"大模型": "大模型",
|
||||
"百度千帆大模型": "百度千帆大模型"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Always checking",
|
||||
"动态检测": "Dynamic detection",
|
||||
"从不检测": "Never detect",
|
||||
"找不到文本?": "Can't find text?"
|
||||
"找不到文本?": "Can't find text?",
|
||||
"腾讯混元大模型": "Tencent's hybrid big model",
|
||||
"传统": "tradition",
|
||||
"大模型": "Large model",
|
||||
"百度千帆大模型": "Baidu Qianfan Large Model"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Siempre detectado",
|
||||
"动态检测": "Detección dinámica",
|
||||
"从不检测": "Nunca detectar",
|
||||
"找不到文本?": "¿No se puede encontrar el texto?"
|
||||
"找不到文本?": "¿No se puede encontrar el texto?",
|
||||
"腾讯混元大模型": "El gran modelo híbrido de Tencent",
|
||||
"传统": "Tradición",
|
||||
"大模型": "Gran modelo",
|
||||
"百度千帆大模型": "Modelo Baidu qianfan"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Toujours détecter",
|
||||
"动态检测": "Détection dynamique",
|
||||
"从不检测": "Jamais détecté",
|
||||
"找不到文本?": "Vous ne trouvez pas le texte?"
|
||||
"找不到文本?": "Vous ne trouvez pas le texte?",
|
||||
"腾讯混元大模型": "Tencent hybride grand modèle",
|
||||
"传统": "La tradition",
|
||||
"大模型": "Le grand modèle",
|
||||
"百度千帆大模型": "Baidu mille voiles modèle"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Controllo sempre",
|
||||
"动态检测": "Rilevamento dinamico",
|
||||
"从不检测": "Non rilevare mai",
|
||||
"找不到文本?": "Non trovi il messaggio?"
|
||||
"找不到文本?": "Non trovi il messaggio?",
|
||||
"腾讯混元大模型": "Il grande modello ibrido di Tencent",
|
||||
"传统": "tradizione",
|
||||
"大模型": "Modello grande",
|
||||
"百度千帆大模型": "Baidu Qianfan Modello grande"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "常に検出",
|
||||
"动态检测": "どうてきけんしゅつ",
|
||||
"从不检测": "検出しない",
|
||||
"找不到文本?": "テキストが見つかりませんか?"
|
||||
"找不到文本?": "テキストが見つかりませんか?",
|
||||
"腾讯混元大模型": "テンセント混元大モデル",
|
||||
"传统": "従来の",
|
||||
"大模型": "ビッグモデル",
|
||||
"百度千帆大模型": "百度千帆大模型"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "항상 테스트",
|
||||
"动态检测": "동적 체크",
|
||||
"从不检测": "테스트하지 않음",
|
||||
"找不到文本?": "텍스트를 찾을 수 없습니까?"
|
||||
"找不到文本?": "텍스트를 찾을 수 없습니까?",
|
||||
"腾讯混元大模型": "텐센트 혼원대모델",
|
||||
"传统": "전통",
|
||||
"大模型": "대형 모델",
|
||||
"百度千帆大模型": "바이두 천범대 모형"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Zawsze sprawdzam",
|
||||
"动态检测": "Wykrywanie dynamiczne",
|
||||
"从不检测": "Nigdy nie wykrywaj",
|
||||
"找不到文本?": "Nie możesz znaleźć smsa?"
|
||||
"找不到文本?": "Nie możesz znaleźć smsa?",
|
||||
"腾讯混元大模型": "Hybrydowy duży model Tencent",
|
||||
"传统": "tradycja",
|
||||
"大模型": "Duży model",
|
||||
"百度千帆大模型": "Duży model Baidu Qianfan"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Всегда проверяйте",
|
||||
"动态检测": "Динамический контроль",
|
||||
"从不检测": "Никогда не проверяю.",
|
||||
"找不到文本?": "Не удалось найти текст?"
|
||||
"找不到文本?": "Не удалось найти текст?",
|
||||
"腾讯混元大模型": "Большая гибридная модель.",
|
||||
"传统": "Традиции",
|
||||
"大模型": "Большая модель",
|
||||
"百度千帆大模型": "Сто тысяч парусов."
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "ตรวจจับเสมอ",
|
||||
"动态检测": "การตรวจจับแบบไดนามิก",
|
||||
"从不检测": "ไม่เคยตรวจจับ",
|
||||
"找不到文本?": "ไม่พบข้อความ?"
|
||||
"找不到文本?": "ไม่พบข้อความ?",
|
||||
"腾讯混元大模型": "Tencent Mixed หยวนรุ่นใหญ่",
|
||||
"传统": "ประเพณี",
|
||||
"大模型": "โมเดลขนาดใหญ่",
|
||||
"百度千帆大模型": "Baidu Qianfan โมเดลขนาดใหญ่"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Her zaman kontrol ediyor",
|
||||
"动态检测": "Dinamik tanıma",
|
||||
"从不检测": "Asla tanıma",
|
||||
"找不到文本?": "Mesaj bulamıyor musun?"
|
||||
"找不到文本?": "Mesaj bulamıyor musun?",
|
||||
"腾讯混元大模型": "Tencent'in hibrid büyük modeli",
|
||||
"传统": "gelenek",
|
||||
"大模型": "Büyük model",
|
||||
"百度千帆大模型": "Baidu Qianfan Büyük Modeli"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Завжди перевіряється",
|
||||
"动态检测": "Динамічне виявлення",
|
||||
"从不检测": "Ніколи не визначити",
|
||||
"找不到文本?": "Не вдалося знайти текст?"
|
||||
"找不到文本?": "Не вдалося знайти текст?",
|
||||
"腾讯混元大模型": "Велика гібридна модель",
|
||||
"传统": "традиція",
|
||||
"大模型": "Велика модель",
|
||||
"百度千帆大模型": "Великий модель Baidu Qianfan"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"总是检测": "Luôn phát hiện",
|
||||
"动态检测": "Phát hiện động",
|
||||
"从不检测": "Không bao giờ phát hiện",
|
||||
"找不到文本?": "Không tìm thấy văn bản?"
|
||||
"找不到文本?": "Không tìm thấy văn bản?",
|
||||
"腾讯混元大模型": "Mô hình hỗn hợp lớn",
|
||||
"传统": "Truyền thống",
|
||||
"大模型": "Mô hình lớn",
|
||||
"百度千帆大模型": "Mô hình 100.000 cánh buồm"
|
||||
}
|
@ -823,5 +823,9 @@
|
||||
"无缩放": "",
|
||||
"总是检测": "",
|
||||
"动态检测": "",
|
||||
"从不检测": ""
|
||||
"从不检测": "",
|
||||
"腾讯混元大模型": "",
|
||||
"传统": "",
|
||||
"大模型": "",
|
||||
"百度千帆大模型": ""
|
||||
}
|
@ -139,4 +139,17 @@ main {
|
||||
main .markdown-section {
|
||||
display: none;
|
||||
}
|
||||
*/
|
||||
*/
|
||||
|
||||
.alert {
|
||||
padding: 10px;
|
||||
background-color: #ff9;
|
||||
border-left: 6px solid #ff0;
|
||||
margin-bottom: 10px;
|
||||
color: black;
|
||||
}
|
||||
|
||||
.alert-warning {
|
||||
background-color: #f0ad4e;
|
||||
border-color: #ec971f;
|
||||
}
|
@ -1,7 +1,20 @@
|
||||
## 国产大模型如何使用ChatGPT兼容接口
|
||||
## 如何使用国产大模型API接口
|
||||
|
||||
## ChatGPT兼容接口
|
||||
|
||||
### DeepSeek
|
||||
|
||||
**API接口地址** `https://api.deepseek.com`
|
||||
|
||||
### 阿里云百炼大模型
|
||||
|
||||
|
||||
**API接口地址** `https://dashscope.aliyuncs.com/compatible-mode/v1`
|
||||
|
||||
### 字节跳动豆包大模型等
|
||||
|
||||
|
||||
|
||||
**API接口地址** `https://ark.cn-beijing.volces.com/api/v3`
|
||||
|
||||
**SECRET_KEY** 跟[官方文档](https://www.volcengine.com/docs/82379/1263279)获取
|
||||
@ -10,8 +23,21 @@
|
||||
|
||||
![img](https://image.lunatranslator.xyz/zh/damoxing/doubao.png)
|
||||
|
||||
## 不兼容的专用接口
|
||||
|
||||
### 阿里云百炼大模型
|
||||
### 腾讯混元大模型
|
||||
|
||||
**API接口地址** `https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation`
|
||||
略
|
||||
|
||||
|
||||
### 百度千帆大模型
|
||||
|
||||
<div class="alert alert-warning" role="alert">
|
||||
<strong>警告:</strong> 这个模型好像只支持中英翻译,不支持日文
|
||||
</div>
|
||||
|
||||
**model** 应填写百度接口文档中的**请求地址**的尾部,例如:
|
||||
|
||||
![img](https://image.lunatranslator.xyz/zh/damoxing/qianfan1.png)
|
||||
|
||||
![img](https://image.lunatranslator.xyz/zh/damoxing/qianfan2.png)
|
||||
|
@ -3,7 +3,7 @@
|
||||
- [获取软件和启动](/zh/start.md)
|
||||
- [软件更新](/zh/update.md)
|
||||
- [常见问题](/zh/qas.md)
|
||||
- [国产大模型如何使用ChatGPT兼容接口](/zh/guochandamoxing.md)
|
||||
- [如何使用国产大模型API接口](/zh/guochandamoxing.md)
|
||||
- [mangaocr 整合包无法启动怎么办](/zh/mangaocr.md)
|
||||
- [OCR 自动化执行方法的参数含义](/zh/ocrparam.md)
|
||||
- [Windows OCR 如何安装额外的语言支持](/zh/windowsocr.md)
|
||||
|
@ -28,8 +28,8 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/version)
|
||||
include(generate_product_version)
|
||||
|
||||
set(VERSION_MAJOR 5)
|
||||
set(VERSION_MINOR 13)
|
||||
set(VERSION_PATCH 4)
|
||||
set(VERSION_MINOR 15)
|
||||
set(VERSION_PATCH 0)
|
||||
|
||||
add_library(pch pch.cpp)
|
||||
target_precompile_headers(pch PUBLIC pch.h)
|
||||
|
Loading…
x
Reference in New Issue
Block a user