This commit is contained in:
恍兮惚兮 2024-11-29 22:58:22 +08:00
parent 97c6999f9b
commit 269168d2e7
42 changed files with 516 additions and 1157 deletions

View File

@ -1,7 +1,7 @@
set(VERSION_MAJOR 6)
set(VERSION_MINOR 7)
set(VERSION_PATCH 2)
set(VERSION_MINOR 8)
set(VERSION_PATCH 0)
set(VERSION_REVISION 0)
set(LUNA_VERSION "{${VERSION_MAJOR},${VERSION_MINOR},${VERSION_PATCH},${VERSION_REVISION}}")
add_library(VERSION_DEF ${CMAKE_CURRENT_LIST_DIR}/version_def.cpp)

View File

@ -239,24 +239,34 @@ DECLARE_API void *add_ContextMenuRequested(void *m_host, int index, const wchar_
CHECK_FAILURE(items->get_Count(&itemsCount));
// Adding a custom context menu item for the page that will display the page's URI.
wil::com_ptr<ICoreWebView2ContextMenuItem> newMenuItem;
CHECK_FAILURE(webviewEnvironment_5->CreateContextMenuItem(
data->label.c_str(),
nullptr,
COREWEBVIEW2_CONTEXT_MENU_ITEM_KIND_COMMAND, &newMenuItem));
newMenuItem->add_CustomItemSelected(
Callback<ICoreWebView2CustomItemSelectedEventHandler>(
[=](
ICoreWebView2ContextMenuItem *sender,
IUnknown *args)
{
LPWSTR selecttext;
CHECK_FAILURE(target->get_SelectionText(&selecttext));
callback(selecttext);
// 不需要freefree反而会崩溃
return S_OK;
})
.Get(),
nullptr);
if (data->label.size())
{
CHECK_FAILURE(webviewEnvironment_5->CreateContextMenuItem(
data->label.c_str(),
nullptr,
COREWEBVIEW2_CONTEXT_MENU_ITEM_KIND_COMMAND, &newMenuItem));
newMenuItem->add_CustomItemSelected(
Callback<ICoreWebView2CustomItemSelectedEventHandler>(
[=](
ICoreWebView2ContextMenuItem *sender,
IUnknown *args)
{
LPWSTR selecttext;
CHECK_FAILURE(target->get_SelectionText(&selecttext));
callback(selecttext);
// 不需要freefree反而会崩溃
return S_OK;
})
.Get(),
nullptr);
}
else
{
CHECK_FAILURE(webviewEnvironment_5->CreateContextMenuItem(
L"",
nullptr,
COREWEBVIEW2_CONTEXT_MENU_ITEM_KIND_SEPARATOR, &newMenuItem));
}
UINT idx;
if (index == -1)
idx = itemsCount;

View File

@ -689,6 +689,13 @@ class MAINUI:
self.audioplayer.timestamp = uuid.uuid4()
reader.read(text2, force, self.audioplayer.timestamp)
@tryprint
def read_text(self, text):
if not self.reader:
return
self.audioplayer.timestamp = uuid.uuid4()
self.reader.read(text, True, self.audioplayer.timestamp)
def readcurrent(self, force=False, needresult=False):
if needresult:
text = self.ttsrepair(self.currentread, self.__usewhich())
@ -941,19 +948,16 @@ class MAINUI:
)
@threader
def clickwordcallback(self, word, append):
def clickwordcallback(self, word):
if globalconfig["usewordorigin"] == False:
word = word["orig"]
else:
word = word.get("origorig", word["orig"])
if globalconfig["usecopyword"]:
if append:
winsharedutils.clipboard_set(winsharedutils.clipboard_get() + word)
else:
winsharedutils.clipboard_set(word)
winsharedutils.clipboard_set(word)
if globalconfig["usesearchword"]:
self.searchwordW.search_word.emit(word, append)
self.searchwordW.search_word.emit(word)
def __dontshowintaborsetbackdrop(self, widget):
window_flags = widget.windowFlags()

View File

@ -0,0 +1,96 @@
import requests
from myutils.utils import urlpathjoin, createurl
from myutils.proxy import getproxy
from cishu.cishubase import cishubase
from myutils.commonbase import maybejson
from translator.gptcommon import qianfanIAM
def list_models(typename, regist):
resp = requests.get(
urlpathjoin(
createurl(regist["API接口地址"]().strip())[: -len("chat/completions")],
"models",
),
headers={
"Authorization": "Bearer " + regist["SECRET_KEY"]().split("|")[0].strip()
},
proxies=getproxy(("cishu", typename)),
)
try:
return sorted([_["id"] for _ in resp.json()["data"]])
except:
raise Exception(maybejson(resp))
class chatgptlike(cishubase):
def init(self):
self.maybeuse = {}
@property
def apiurl(self):
return self.config.get(
"API接口地址", self.config.get("OPENAI_API_BASE", "")
).strip()
def createdata(self, message):
temperature = self.config["Temperature"]
data = dict(
model=self.config["model"],
messages=message,
# optional
max_tokens=self.config["max_tokens"],
n=1,
# stop=None,
top_p=self.config["top_p"],
temperature=temperature,
)
if "api.mistral.ai" not in self.apiurl:
data.update(dict(frequency_penalty=self.config["frequency_penalty"]))
return data
def search(self, word):
query = self._gptlike_createquery(
word, "use_user_user_prompt", "user_user_prompt"
)
sysprompt = self._gptlike_createsys("使用自定义promt", "自定义promt")
message = [{"role": "system", "content": sysprompt}]
message.append({"role": "user", "content": query})
response = self.proxysession.post(
self.createurl(),
headers=self.createheaders(),
json=self.createdata(message),
)
return self.markdown_to_html(self.commonparseresponse(response))
def createheaders(self):
_ = {}
curkey = self.config["SECRET_KEY"]
if curkey:
# 部分白嫖接口可以不填,填了反而报错
_.update({"Authorization": "Bearer " + curkey})
if "openai.azure.com/openai/deployments/" in self.apiurl:
_.update({"api-key": curkey})
elif ("qianfan.baidubce.com/v2" in self.apiurl) and (":" in curkey):
if not self.maybeuse.get(curkey):
Access_Key, Secret_Key = curkey.split(":")
key = qianfanIAM.getkey(Access_Key, Secret_Key, self.proxy)
self.maybeuse[curkey] = key
_.update({"Authorization": "Bearer " + self.maybeuse[curkey]})
return _
def createurl(self):
if "openai.azure.com/openai/deployments/" in self.apiurl:
return self.apiurl
return createurl(self.apiurl)
def commonparseresponse(self, response: requests.ResponseBase):
try:
message = (
response.json()["choices"][0]["message"]["content"]
.replace("\n\n", "\n")
.strip()
)
except:
raise Exception(response)
return message

View File

@ -2,6 +2,15 @@ from myutils.config import globalconfig
from myutils.wrapper import threader
from traceback import print_exc
from myutils.proxy import getproxy
from myutils.utils import (
SafeFormatter,
createenglishlangmap,
getlangtgt,
getlangsrc,
create_langmap,
)
from myutils.commonbase import ArgsEmptyExc, proxysession
import re
class DictTree:
@ -24,6 +33,7 @@ class cishubase:
def __init__(self, typename) -> None:
self.typename = typename
self.proxysession = proxysession("cishu", self.typename)
self.callback = print
self.needinit = True
try:
@ -54,3 +64,80 @@ class cishubase:
@property
def config(self):
return globalconfig["cishu"][self.typename]["args"]
def _gptlike_createquery(self, query, usekey, tempk):
user_prompt = (
self.config.get(tempk, "") if self.config.get(usekey, False) else ""
)
fmt = SafeFormatter()
return fmt.format(user_prompt, must_exists="sentence", sentence=query)
def _gptlike_createsys(self, usekey, tempk):
fmt = SafeFormatter()
if self.config[usekey]:
template = self.config[tempk]
else:
template = "You are a professional dictionary assistant whose task is to help users search for information such as the meaning, pronunciation, etymology, synonyms, antonyms, and example sentences of {srclang} words. You should be able to handle queries in multiple languages and provide in-depth information or simple definitions according to user needs. You should reply in {tgtlang}."
tgt = getlangtgt()
src = getlangsrc()
langmap = create_langmap(createenglishlangmap())
tgtlang = langmap.get(tgt, tgt)
srclang = langmap.get(src, src)
return fmt.format(template, srclang=srclang, tgtlang=tgtlang)
def checkempty(self, items):
emptys = []
for item in items:
if (self.config[item]) == "":
emptys.append(item)
if len(emptys):
emptys_s = []
argstype = self.config.get("argstype", {})
for e in emptys:
name = argstype.get(e, {}).get("name", e)
emptys_s.append(name)
raise ArgsEmptyExc(emptys_s)
def markdown_to_html(self, markdown_text: str):
print(markdown_text)
lines = markdown_text.split("\n")
html_lines = []
for line in lines:
if line.startswith("# "):
html_lines.append(f"<h1>{line[2:]}</h1>")
elif line.startswith("## "):
html_lines.append(f"<h2>{line[3:]}</h2>")
elif line.startswith("### "):
html_lines.append(f"<h3>{line[4:]}</h3>")
else:
def parsex(line):
line = re.sub(r"\*\*(.*?)\*\*", r"<strong>\1</strong>", line)
line = re.sub(r"\*(.*?)\*", r"<em>\1</em>", line)
return line
if line.startswith("- ") or line.startswith("* "):
html_lines.append(f"<li>{parsex(line[2:])}</li>")
else:
html_lines.append(f"<p>{parsex(line)}</p>")
final_html = []
in_list = False
for line in html_lines:
if line.startswith("<li>"):
if not in_list:
final_html.append("<ul>")
in_list = True
final_html.append(line)
elif in_list:
final_html.append("</ul>")
in_list = False
final_html.append(line)
else:
final_html.append(line)
if in_list:
final_html.append("</ul>")
return "".join(final_html)

View File

@ -0,0 +1,91 @@
import requests
from myutils.utils import urlpathjoin
from myutils.proxy import getproxy
from cishu.cishubase import cishubase
def list_models(typename, regist):
js = requests.get(
urlpathjoin(regist["BASE_URL"]().strip(), "v1beta/models"),
params={"key": regist["SECRET_KEY"]().split("|")[0].strip()},
proxies=getproxy(("fanyi", typename)),
).json()
try:
models = js["models"]
except:
raise Exception(js)
mm = []
for m in models:
name: str = m["name"]
supportedGenerationMethods: list = m["supportedGenerationMethods"]
if "generateContent" not in supportedGenerationMethods:
continue
if name.startswith("models/"):
name = name[7:]
mm.append(name)
return sorted(mm)
class gemini(cishubase):
def search(self, word):
self.checkempty(["SECRET_KEY", "model"])
api_key = self.config["SECRET_KEY"]
model = self.config["model"]
query = self._gptlike_createquery(
word, "use_user_user_prompt", "user_user_prompt"
)
safety = {
"safety_settings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
]
}
gen_config = {
"generationConfig": {
"stopSequences": [" \n"],
"temperature": self.config["Temperature"],
}
}
sysprompt = self._gptlike_createsys("use_custom_prompt", "custom_prompt")
sys_message = {"systemInstruction": {"parts": {"text": sysprompt}}}
contents = {"contents": [{"role": "user", "parts": [{"text": query}]}]}
payload = {}
payload.update(contents)
payload.update(safety)
payload.update(sys_message)
payload.update(gen_config)
# Set up the request headers and URL
headers = {"Content-Type": "application/json"}
# by default https://generativelanguage.googleapis.com/v1
# Send the request
response = self.proxysession.post(
urlpathjoin(
self.config["BASE_URL"],
"v1beta/models/{}:generateContent?key={}".format(model, api_key),
),
headers=headers,
json=payload,
)
try:
return self.markdown_to_html(
response.json()["candidates"][0]["content"]["parts"][0]["text"]
)
except Exception as e:
raise Exception(response) from e

View File

@ -2,8 +2,12 @@ import functools, os
import gobject
from myutils.utils import splitocrtypes
from myutils.config import globalconfig, _TR, get_platform
from gui.inputdialog import multicolorset, autoinitdialog
from gui.inputdialog import autoinitdialog, autoinitdialog_items
from gui.inputdialog import (
multicolorset,
autoinitdialogx,
autoinitdialog_items,
autoinitdialog,
)
from gui.usefulwidget import (
yuitsu_switch,
makescrollgrid,
@ -40,7 +44,7 @@ def gethiragrid(self):
items[-1]["callback"] = gobject.baseobject.starthira
_3 = D_getIconButton(
callback=functools.partial(
autoinitdialog,
autoinitdialogx,
self,
globalconfig["hirasetting"][name]["args"],
globalconfig["hirasetting"][name]["name"],
@ -142,12 +146,14 @@ def initinternal(self, names):
line += [
D_getIconButton(
callback=functools.partial(
autoinitdialog,
autoinitdialogx,
self,
globalconfig["cishu"][cishu]["args"],
globalconfig["cishu"][cishu]["name"],
800,
items,
"cishu." + cishu,
cishu,
),
icon="fa.gear",
),

View File

@ -846,7 +846,7 @@ class DynamicTreeModel(QStandardItemModel):
if not self.data(index, isWordNode):
return
gobject.baseobject.searchwordW.search_word.emit(
self.itemFromIndex(index).text(), False
self.itemFromIndex(index).text()
)
@ -976,7 +976,7 @@ class showdiction(LMainWindow):
class searchwordW(closeashidewindow):
search_word = pyqtSignal(str, bool)
search_word = pyqtSignal(str)
show_dict_result = pyqtSignal(float, str, str)
search_word_in_new_window = pyqtSignal(str)
@ -1106,7 +1106,9 @@ class searchwordW(closeashidewindow):
self.searchlayout.addWidget(searchbutton)
soundbutton = QPushButton(qtawesome.icon("fa.music"), "")
soundbutton.clicked.connect(self.tts_for_searched_word)
soundbutton.clicked.connect(
lambda: gobject.baseobject.read_text(self.searchtext.text())
)
soundbutton.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
soundbutton.customContextMenuRequested.connect(self.showmenu_auto_sound)
self.soundbutton = soundbutton
@ -1141,12 +1143,12 @@ class searchwordW(closeashidewindow):
self.tabks = []
self.setCentralWidget(ww)
self.textOutput = auto_select_webview(self, True)
self.textOutput.add_menu(
0, _TR("查词"), lambda w: self.search_word.emit(w, False)
)
self.textOutput.add_menu(0, _TR("查词"), self.search_word.emit)
self.textOutput.add_menu(
1, _TR("在新窗口中查词"), threader(self.search_word_in_new_window.emit)
)
self.textOutput.add_menu(2, _TR("翻译"), gobject.baseobject.textgetmethod)
self.textOutput.add_menu(3, _TR("朗读"), gobject.baseobject.read_text)
self.textOutput.set_zoom(globalconfig["ZoomFactor"])
self.textOutput.on_ZoomFactorChanged.connect(
functools.partial(globalconfig.__setitem__, "ZoomFactor")
@ -1181,13 +1183,6 @@ class searchwordW(closeashidewindow):
self.ankiwindow.hide()
self.isfirstshowanki = False
def tts_for_searched_word(self):
if gobject.baseobject.reader:
gobject.baseobject.audioplayer.timestamp = uuid.uuid4()
gobject.baseobject.reader.read(
self.searchtext.text(), True, gobject.baseobject.audioplayer.timestamp
)
def generate_dictionarys(self):
res = []
tabks = []
@ -1206,13 +1201,11 @@ class searchwordW(closeashidewindow):
res.insert(idx, {"dict": k, "content": v})
return res
def __click_word_search_function(self, word, append):
def __click_word_search_function(self, word):
self.showNormal()
if self.state != 2:
return
word = word.strip()
if append:
word = self.searchtext.text() + word
self.searchtext.setText(word)
self.search(word)
@ -1263,7 +1256,7 @@ class searchwordW(closeashidewindow):
if word == "":
return
if globalconfig["is_search_word_auto_tts"]:
self.tts_for_searched_word()
gobject.baseobject.read_text(self.searchtext.text())
self.ankiwindow.reset(word)
for i in range(self.tab.count()):
self.tab.removeTab(0)

View File

@ -1304,6 +1304,8 @@ class WebivewWidget(abstractwebview):
t.timeout.emit()
t.start()
self.add_menu(0, "", lambda _: None)
def __darkstatechecker(self):
dl = globalconfig["darklight2"]
if dl == self.__darkstate:
@ -1455,6 +1457,7 @@ class QWebWrap(abstractwebview):
class mshtmlWidget(abstractwebview):
CommandBase = 10086
def __del__(self):
if not self.browser:
return

View File

@ -1,6 +1,6 @@
from myutils.proxy import getproxy
from myutils.utils import getlangtgt, getlangsrc, getlanguagespace
from myutils.config import _TR, static_data
from myutils.utils import getlangtgt, getlangsrc, getlanguagespace, create_langmap
from myutils.config import _TR
from myutils.wrapper import stripwrapper
import requests
@ -95,15 +95,7 @@ class commonbase:
@property
def langmap_(self):
_ = dict(
zip(
[_["code"] for _ in static_data["lang_list_all"]],
[_["code"] for _ in static_data["lang_list_all"]],
)
)
_.update({"cht": "zh", "auto": "auto"})
_.update(self.langmap())
return _
return create_langmap(self.langmap())
def __init__(self, typename) -> None:
self.typename = typename

View File

@ -920,6 +920,18 @@ def createurl(url: str):
return url
def create_langmap(langmap):
_ = dict(
zip(
[_["code"] for _ in static_data["lang_list_all"]],
[_["code"] for _ in static_data["lang_list_all"]],
)
)
_.update({"cht": "zh", "auto": "auto"})
_.update(langmap)
return _
def createenglishlangmap():
mp = dict(
zip(

View File

@ -1,43 +0,0 @@
import base64
from ocrengines.baseocrclass import baseocr
class OCR(baseocr):
def initocr(self):
self.tokens = {}
self.check()
def check(self):
self.checkempty(["app_id", "app_secret"])
app_id = self.config["app_id"]
app_secret = self.config["app_secret"]
if (app_id, app_secret) not in self.tokens:
res = self.proxysession.post(
"https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal",
headers={"Content-Type": "application/json; charset=utf-8"},
json={"app_id": app_id, "app_secret": app_secret},
)
try:
token = res.json()["tenant_access_token"]
except:
raise Exception(res)
self.tokens[(app_id, app_secret)] = token
return self.tokens[(app_id, app_secret)]
def ocr(self, imagebinary):
token = self.check()
b64 = base64.b64encode(imagebinary)
res = self.proxysession.post(
"https://open.feishu.cn/open-apis/optical_char_recognition/v1/image/basic_recognize",
headers={
"Content-Type": "application/json; charset=utf-8",
"Authorization": "Bearer " + token,
},
json={
"image": str(b64, encoding="utf8"),
},
)
try:
return res.json()["data"]["text_list"]
except:
raise Exception(res)

View File

@ -69,10 +69,6 @@ class OCR(baseocr):
proxies=self.proxy,
)
try:
# Handle the response
if response.status_code == 200:
return response.json()["candidates"][0]["content"]["parts"][0]["text"]
else:
raise Exception(response)
return response.json()["candidates"][0]["content"]["parts"][0]["text"]
except Exception as e:
raise Exception(response) from e

View File

@ -1,7 +1,7 @@
from qtsymbols import *
from myutils.config import globalconfig, static_data
from rendertext.somefunctions import dataget
import gobject, functools, importlib, winsharedutils
import gobject, functools, importlib, winsharedutils, uuid
from traceback import print_exc
from rendertext.textbrowser_imp.base import base
from gui.dynalang import LAction
@ -22,10 +22,8 @@ class Qlabel_c(QLabel):
if self.rect().contains(event.pos()):
try:
if self.pr:
if event.button() == Qt.MouseButton.RightButton:
self.callback(True)
else:
self.callback(False)
if event.button() == Qt.MouseButton.LeftButton:
self.callback()
except:
print_exc()
self.pr = False
@ -102,18 +100,26 @@ class TextBrowser(QWidget, dataget):
curr = self.textbrowser.textCursor().selectedText()
if not curr:
return
menu = QMenu(self)
menu = QMenu(gobject.baseobject.commonstylebase)
search = LAction(("查词"))
translate = LAction(("翻译"))
tts = LAction(("朗读"))
copy = LAction(("复制"))
menu.addAction(search)
menu.addAction(translate)
menu.addAction(tts)
menu.addSeparator()
menu.addAction(copy)
action = menu.exec(self.mapToGlobal(p))
if action == search:
gobject.baseobject.searchwordW.search_word.emit(curr, False)
gobject.baseobject.searchwordW.search_word.emit(curr)
elif action == copy:
winsharedutils.clipboard_set(curr)
elif action == tts:
gobject.baseobject.read_text(curr)
elif action == translate:
gobject.baseobject.textgetmethod(curr, False)
def __init__(self, parent) -> None:
super().__init__(parent)

View File

@ -54,12 +54,14 @@ class TextBrowser(QWidget, dataget):
)
)
windows.SetWindowLongPtr(webviewhwnd, windows.GWLP_WNDPROC, self.wndproc)
self.webivewwidget.add_menu(0, _TR("朗读"), gobject.baseobject.read_text)
self.webivewwidget.add_menu(0, _TR("翻译"), gobject.baseobject.textgetmethod)
self.webivewwidget.add_menu(
0,
_TR("查词"),
threader(
lambda w: gobject.baseobject.searchwordW.search_word.emit(
w.replace("\n", "").strip(), False
w.replace("\n", "").strip()
)
),
)
@ -191,9 +193,9 @@ class TextBrowser(QWidget, dataget):
def calllunaclickedword(self, wordinfo):
clickfunction = wordinfo.get("clickfunction", None)
if clickfunction:
self.saveclickfunction.get(clickfunction)(False)
self.saveclickfunction.get(clickfunction)()
else:
gobject.baseobject.clickwordcallback(wordinfo, False)
gobject.baseobject.clickwordcallback(wordinfo)
# native api end

View File

@ -1,107 +0,0 @@
from translator.basetranslator import basetrans
import json, requests
from traceback import print_exc
from myutils.utils import createenglishlangmap
class TS(basetrans):
def langmap(self):
return createenglishlangmap()
def __init__(self, typename):
self.context = []
self.access = {}
super().__init__(typename)
def createdata(self, message):
temperature = self.config["Temperature"]
system = self._gptlike_createsys("use_user_prompt", "user_prompt")
data = dict(
system=system,
model=self.config["model"],
messages=message,
# optional
max_tokens=self.config["max_tokens"],
n=1,
# stop=None,
top_p=self.config["top_p"],
temperature=temperature,
frequency_penalty=self.config["frequency_penalty"],
stream=self.config["usingstream"],
)
return data
def commonparseresponse(self, query, response: requests.ResponseBase, usingstream):
if usingstream:
message = ""
for chunk in response.iter_lines():
response_data = chunk.decode("utf-8").strip()
if not response_data:
continue
try:
json_data = json.loads(response_data[6:])
msg = json_data["result"].replace("\n\n", "\n").strip()
message += msg
except:
print_exc()
raise Exception(response_data)
yield msg
else:
try:
message = response.json()["result"].replace("\n\n", "\n").strip()
except:
raise Exception(response)
yield message
self.context.append({"role": "user", "content": query})
self.context.append({"role": "assistant", "content": message})
def get_access_token(self, API_KEY, SECRET_KEY):
url = "https://aip.baidubce.com/oauth/2.0/token"
params = {
"grant_type": "client_credentials",
"client_id": API_KEY,
"client_secret": SECRET_KEY,
}
js = self.proxysession.post(url, params=params).json()
try:
return js["access_token"]
except:
raise Exception(js)
def checkchange(self):
self.checkempty(["model", "SECRET_KEY", "API_KEY"])
SECRET_KEY, API_KEY = (
self.multiapikeycurrent["SECRET_KEY"],
self.multiapikeycurrent["API_KEY"],
)
if not self.access.get((API_KEY, SECRET_KEY)):
acss = self.get_access_token(API_KEY, SECRET_KEY)
self.access[(API_KEY, SECRET_KEY)] = acss
return self.access[(API_KEY, SECRET_KEY)]
def translate(self, query):
acss = self.checkchange()
query = self._gptlike_createquery(
query, "use_user_user_prompt", "user_user_prompt"
)
message = []
self._gpt_common_parse_context(
message, self.context, self.config["context_num"]
)
message.append({"role": "user", "content": query})
usingstream = self.config["usingstream"]
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/{}?access_token={}".format(
self.config["model"], acss
)
response = self.proxysession.post(
url,
json=self.createdata(message),
stream=usingstream,
)
return self.commonparseresponse(query, response, usingstream)

View File

@ -1,48 +0,0 @@
from translator.basetranslator import basetrans
class TS(basetrans):
def langmap(self):
return {"cht": "zh-Hant"}
def inittranslator(self):
self.tokens = {}
self.check()
def check(self):
self.checkempty(["app_id", "app_secret"])
app_id = self.multiapikeycurrent["app_id"]
app_secret = self.multiapikeycurrent["app_secret"]
if (app_id, app_secret) not in self.tokens:
res = self.proxysession.post(
"https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal",
headers={"Content-Type": "application/json; charset=utf-8"},
json={"app_id": app_id, "app_secret": app_secret},
)
try:
token = res.json()["tenant_access_token"]
except:
raise Exception(res)
self.tokens[(app_id, app_secret)] = token
return self.tokens[(app_id, app_secret)]
def translate(self, query):
token = self.check()
res = self.proxysession.post(
"https://open.feishu.cn/open-apis/translation/v1/text/translate",
headers={
"Content-Type": "application/json; charset=utf-8",
"Authorization": "Bearer " + token,
},
json={
"source_language": self.srclang,
"text": query,
"target_language": self.tgtlang,
"glossary": [],
},
)
try:
return res.json()["data"]["text"]
except:
raise Exception(res)

View File

@ -1,301 +0,0 @@
from translator.basetranslator import basetrans
import ctypes
import os
import glob
import platform
import re
class TS(basetrans):
def inittranslator(self):
self.checkempty(["path"])
path = self.config["path"]
if os.path.exists(path) == False:
raise Exception("OrtMTLib translator path incorrect")
model_path_candidates = glob.glob(
os.path.join(path, "translator_model", "*.onnx"), recursive=True
)
if len(model_path_candidates) > 0:
model_path = model_path_candidates[0]
else:
raise Exception("onnx file not found!")
tok_path_candidates = glob.glob(
os.path.join(path, "tokenizer_model", "*.model")
)
if len(model_path_candidates) > 0:
tok_path = tok_path_candidates[0]
else:
raise Exception("sentencepiece tokenizer file not found!")
if platform.architecture()[0] == "64bit":
self.setup_splib(os.path.join(path, "bin/x64/splib.dll"), tok_path)
else:
self.setup_splib(os.path.join(path, "bin/x86/splib.dll"), tok_path)
if platform.architecture()[0] == "64bit":
self.setup_ortmtlib(os.path.join(path, "bin/x64/ortmtlib.dll"), model_path)
else:
self.setup_ortmtlib(os.path.join(path, "bin/x86/ortmtlib.dll"), model_path)
self.create_ort_tensors()
def setup_splib(self, sp_dll_path, tok_path):
self.splib = ctypes.CDLL(sp_dll_path)
self.splib.create_sp_tokenizer.argtypes = (ctypes.c_char_p,)
self.splib.create_sp_tokenizer.restype = ctypes.c_int
self.splib.encode_as_ids.argtypes = (
ctypes.c_char_p,
ctypes.POINTER(ctypes.POINTER(ctypes.c_int)),
ctypes.POINTER(ctypes.c_size_t),
)
self.splib.encode_as_ids.restype = ctypes.c_int
self.splib.decode_from_ids.argtypes = (
ctypes.POINTER(ctypes.c_int),
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_char_p),
)
self.splib.decode_from_ids.restype = ctypes.c_int
tok_path_ctypes = ctypes.c_char_p(bytes(tok_path, "utf8"))
res = self.splib.create_sp_tokenizer(tok_path_ctypes)
return
def setup_ortmtlib(self, ort_dll_path, model_path):
self.ort = ctypes.CDLL(
os.path.join(os.path.dirname(ort_dll_path), "onnxruntime.dll")
)
self.ortmtlib = ctypes.CDLL(ort_dll_path)
self.ortmtlib.create_ort_session.restype = ctypes.c_int
self.ortmtlib.create_ort_session.argtypes = (ctypes.c_char_p, ctypes.c_int)
self.ortmtlib.create_tensor_int32.restype = ctypes.c_int
self.ortmtlib.create_tensor_int32.argtypes = (
ctypes.POINTER(ctypes.c_int32),
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_longlong),
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_void_p),
)
self.ortmtlib.create_tensor_float.restype = ctypes.c_int
self.ortmtlib.create_tensor_float.argtypes = (
ctypes.POINTER(ctypes.c_float),
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_longlong),
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_void_p),
)
self.ortmtlib.run_session.argtypes = (
ctypes.POINTER(ctypes.c_void_p),
ctypes.c_char_p,
ctypes.POINTER(ctypes.POINTER(ctypes.c_int)),
ctypes.POINTER(ctypes.c_size_t),
)
self.ortmtlib.run_session.restype = ctypes.c_int
model_path_ctypes = ctypes.c_char_p(bytes(model_path, "utf8"))
n_threads_ctypes = ctypes.c_int(6)
res = self.ortmtlib.create_ort_session(model_path_ctypes, n_threads_ctypes)
return
def create_ort_tensors(self):
max_length = int(self.config["最大生成长度"])
min_length = int(self.config["最小生成长度"])
num_beams = int(self.config["柱搜索数"])
num_return_sequences = int(self.config["序列数"])
length_penalty = float(self.config["过长惩罚"])
repetition_penalty = float(self.config["重复惩罚"])
self.max_length_tensor = ctypes.c_void_p()
self.min_length_tensor = ctypes.c_void_p()
self.num_beams_tensor = ctypes.c_void_p()
self.num_return_sequences_tensor = ctypes.c_void_p()
self.length_penalty_tensor = ctypes.c_void_p()
self.repetition_penalty_tensor = ctypes.c_void_p()
self.shape_one = (ctypes.c_longlong * 1)(1)
self.len_one = ctypes.c_size_t(1)
self.max_length_ctypes = (ctypes.c_int32 * 1)(max_length)
res = self.ortmtlib.create_tensor_int32(
self.max_length_ctypes,
self.len_one,
self.shape_one,
self.len_one,
ctypes.byref(self.max_length_tensor),
)
self.min_length_ctypes = (ctypes.c_int32 * 1)(min_length)
res = self.ortmtlib.create_tensor_int32(
self.min_length_ctypes,
self.len_one,
self.shape_one,
self.len_one,
ctypes.byref(self.min_length_tensor),
)
self.num_beams_ctypes = (ctypes.c_int32 * 1)(num_beams)
res = self.ortmtlib.create_tensor_int32(
self.num_beams_ctypes,
self.len_one,
self.shape_one,
self.len_one,
ctypes.byref(self.num_beams_tensor),
)
self.num_return_sequences_ctypes = (ctypes.c_int32 * 1)(num_return_sequences)
res = self.ortmtlib.create_tensor_int32(
self.num_return_sequences_ctypes,
self.len_one,
self.shape_one,
self.len_one,
ctypes.byref(self.num_return_sequences_tensor),
)
self.length_penalty_ctypes = (ctypes.c_float * 1)(length_penalty)
res = self.ortmtlib.create_tensor_float(
self.length_penalty_ctypes,
self.len_one,
self.shape_one,
self.len_one,
ctypes.byref(self.length_penalty_tensor),
)
self.repetition_penalty_ctypes = (ctypes.c_float * 1)(repetition_penalty)
res = self.ortmtlib.create_tensor_float(
self.repetition_penalty_ctypes,
self.len_one,
self.shape_one,
self.len_one,
ctypes.byref(self.repetition_penalty_tensor),
)
return
def encode_as_ids(self, content):
input_str = ctypes.c_char_p(
"<-{}2{}-> {}".format(self.srclang, self.tgtlang, content).encode("utf8")
)
token_ids = ctypes.POINTER(ctypes.c_int32)()
n_tokens = ctypes.c_size_t()
decoded_str = ctypes.c_char_p()
res = self.splib.encode_as_ids(
input_str, ctypes.byref(token_ids), ctypes.byref(n_tokens)
)
input_ids_len = n_tokens.value
input_ids_py = [token_ids[i] for i in range(input_ids_len)]
input_ids_py += [
1
] # add EOS token to notify the end of sentence and prevent repetition
self.splib.free_ptr(token_ids)
return input_ids_py
def decode_from_ids(self, output_ids_py):
output_len = len(output_ids_py)
decoded_str = ctypes.c_char_p()
output_ids_ctypes = (ctypes.c_int * output_len)(*output_ids_py)
res = self.splib.decode_from_ids(
output_ids_ctypes, output_len, ctypes.byref(decoded_str)
)
decoded_str_py = decoded_str.value.decode("utf8")
self.splib.free_ptr(decoded_str)
return decoded_str_py
def run_session(self, input_ids_py):
input_ids_len = len(input_ids_py)
input_ids_tensor = ctypes.c_void_p()
input_ids_ctypes = (ctypes.c_int32 * input_ids_len)(*input_ids_py)
input_ids_len_ctypes = ctypes.c_size_t(input_ids_len)
input_shape_ctypes = (ctypes.c_longlong * 2)(1, input_ids_len)
input_shape_len_ctypes = ctypes.c_size_t(2)
res = self.ortmtlib.create_tensor_int32(
input_ids_ctypes,
input_ids_len_ctypes,
input_shape_ctypes,
input_shape_len_ctypes,
ctypes.byref(input_ids_tensor),
)
# self.ortmtlib.print_tensor_int32(input_ids_tensor)
input_tensors = [
input_ids_tensor,
self.max_length_tensor,
self.min_length_tensor,
self.num_beams_tensor,
self.num_return_sequences_tensor,
self.length_penalty_tensor,
self.repetition_penalty_tensor,
]
input_tensors_ctypes = (ctypes.c_void_p * len(input_tensors))(*input_tensors)
output_ids = ctypes.POINTER(ctypes.c_int)()
output_len = ctypes.c_size_t()
output_name = ctypes.c_char_p(bytes("sequences", "utf8"))
res = self.ortmtlib.run_session(
input_tensors_ctypes,
output_name,
ctypes.byref(output_ids),
ctypes.byref(output_len),
)
output_ids_py = []
for i in range(output_len.value):
output_ids_py.append(output_ids[i])
self.ortmtlib.release_ort_tensor(input_ids_tensor)
self.ortmtlib.free_ptr(output_ids)
return output_ids_py
def translate(self, content):
delimiters = [
".",
"",
"\n",
":",
"",
"?",
"",
"!",
"",
"",
"",
"",
]
raw_split = [
i.strip() for i in re.split("([" + "".join(delimiters) + "])", content)
]
content_split = [i for i in raw_split if i]
translated_list = []
i = 0
while i < len(content_split):
sentence = content_split[i]
while i + 1 < len(content_split):
if content_split[i + 1] not in delimiters:
break
i += 1
sentence += content_split[i]
input_ids_py = self.encode_as_ids(sentence)
output_ids_py = self.run_session(input_ids_py)
translated_sentence = self.decode_from_ids(output_ids_py)
translated_list.append(translated_sentence)
i += 1
translated = "".join(translated_list)
return translated
def __del__(self):
self.ortmtlib.release_ort_tensor(self.max_length_tensor)
self.ortmtlib.release_ort_tensor(self.min_length_tensor)
self.ortmtlib.release_ort_tensor(self.num_beams_tensor)
self.ortmtlib.release_ort_tensor(self.num_return_sequences_tensor)
self.ortmtlib.release_ort_tensor(self.length_penalty_tensor)
self.ortmtlib.release_ort_tensor(self.repetition_penalty_tensor)
self.ortmtlib.release_all_globals()

View File

@ -1,152 +0,0 @@
from translator.basetranslator import basetrans
import json
from myutils.utils import createenglishlangmap
from datetime import datetime
import hashlib, sys, hmac, time, json
def sign_tc3(secret_key, date, service, str2sign):
def _hmac_sha256(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256)
def _get_signature_key(key, date, service):
k_date = _hmac_sha256(("TC3" + key).encode("utf-8"), date)
k_service = _hmac_sha256(k_date.digest(), service)
k_signing = _hmac_sha256(k_service.digest(), "tc3_request")
return k_signing.digest()
signing_key = _get_signature_key(secret_key, date, service)
signature = _hmac_sha256(signing_key, str2sign).hexdigest()
return signature
def _get_tc3_signature(
header, method, canonical_uri, payload, secret_key, date, service, options=None
):
options = options or {}
canonical_querystring = ""
if sys.version_info[0] == 3 and isinstance(payload, type("")):
payload = payload.encode("utf8")
payload_hash = hashlib.sha256(payload).hexdigest()
canonical_headers = "content-type:%s\nhost:%s\n" % (
header["Content-Type"],
header["Host"],
)
signed_headers = "content-type;host"
canonical_request = "%s\n%s\n%s\n%s\n%s\n%s" % (
method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
payload_hash,
)
algorithm = "TC3-HMAC-SHA256"
credential_scope = date + "/" + service + "/tc3_request"
if sys.version_info[0] == 3:
canonical_request = canonical_request.encode("utf8")
digest = hashlib.sha256(canonical_request).hexdigest()
string2sign = "%s\n%s\n%s\n%s" % (
algorithm,
header["X-TC-Timestamp"],
credential_scope,
digest,
)
return sign_tc3(secret_key, date, service, string2sign)
def _build_req_with_tc3_signature(key, _id, action, params, options=None):
header = {}
header["Content-Type"] = "application/json"
endpoint = "hunyuan.tencentcloudapi.com"
timestamp = int(time.time())
header["Host"] = endpoint
header["X-TC-Action"] = action[0].upper() + action[1:]
header["X-TC-RequestClient"] = "SDK_PYTHON_3.0.1193"
header["X-TC-Timestamp"] = str(timestamp)
header["X-TC-Version"] = "2023-09-01"
data = json.dumps(params).encode("utf8")
service = "hunyuan"
date = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d")
signature = _get_tc3_signature(
header, "POST", "/", data, key, date, service, options
)
auth = (
"TC3-HMAC-SHA256 Credential=%s/%s/%s/tc3_request, SignedHeaders=content-type;host, Signature=%s"
% (_id, date, service, signature)
)
header["Authorization"] = auth
return header
class TS(basetrans):
def langmap(self):
return createenglishlangmap()
def __init__(self, typename):
self.context = []
super().__init__(typename)
def translate(self, query):
self.checkempty(["secret_id", "secret_key"])
query = self._gptlike_createquery(
query, "use_user_user_prompt", "user_user_prompt"
)
sysprompt = self._gptlike_createsys("use_user_prompt", "user_prompt")
message = [{"Role": "system", "Content": sysprompt}]
self._gpt_common_parse_context(message, self.context, self.config["context_num"])
message.append({"Role": "user", "Content": query})
usingstream = self.config["usingstream"]
json_data = {
"Model": self.config["model"],
"Messages": message,
"Stream": usingstream,
"TopP": self.config["top_p"],
"Temperature": self.config["Temperature"],
}
headers = _build_req_with_tc3_signature(
self.multiapikeycurrent["secret_key"],
self.multiapikeycurrent["secret_id"],
"ChatCompletions",
json_data,
)
response = self.proxysession.post(
"https://hunyuan.tencentcloudapi.com/",
headers=headers,
data=json.dumps(json_data),
stream=usingstream,
)
if usingstream:
message = ""
for i in response.iter_lines():
if not i:
continue
i = i.decode("utf8")[6:]
try:
mes = json.loads(i)["Choices"][0]["Delta"]["Content"]
except:
raise Exception(i)
yield mes
message += mes
else:
try:
message = response.json()["Response"]["Choices"][0]["Message"][
"Content"
]
except:
raise Exception(response)
yield message
self.context.append({"Role": "user", "Content": query})
self.context.append({"Role": "assistant", "Content": message})

View File

@ -1396,6 +1396,150 @@
"use": false,
"name": "goo"
},
"gemini": {
"use": false,
"name": "Gemini",
"args": {
"SECRET_KEY": "",
"Temperature": 0.3,
"BASE_URL": "https://generativelanguage.googleapis.com",
"model": "gemini-1.5-flash",
"modellistcache": [],
"use_custom_prompt": false,
"custom_prompt": "",
"user_user_prompt": "{sentence}",
"use_user_user_prompt": false,
"s": "",
"s2": ""
},
"argstype": {
"BASE_URL": {
"name": "API接口地址",
"rank": 0
},
"user_user_prompt": {
"name": "自定义_user message",
"refswitch": "use_user_user_prompt",
"rank": 5.1
},
"s": {
"type": "split",
"rank": 3.5
},
"s2": {
"type": "split",
"rank": 5.12
},
"custom_prompt": {
"name": "自定义_system prompt",
"type": "multiline",
"refswitch": "use_custom_prompt",
"rank": 5
},
"SECRET_KEY": {
"rank": 2,
"name": "API Key",
"type": "textlist"
},
"model": {
"rank": 3,
"type": "lineedit_or_combo",
"list_function": "list_models",
"list_cache": "modellistcache"
},
"modellistcache": {
"type": "list_cache"
},
"Temperature": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.1
}
}
},
"chatgptlike": {
"use": false,
"name": "ChatGPT_兼容接口",
"args": {
"model": "gpt-4o-mini",
"modellistcache": [],
"API接口地址": "https://api.openai.com",
"SECRET_KEY": "",
"使用自定义promt": false,
"自定义promt": "",
"Temperature": 0.3,
"top_p": 0.3,
"max_tokens": 1024,
"frequency_penalty": 0,
"user_user_prompt": "{sentence}",
"use_user_user_prompt": false,
"s": "",
"s2": ""
},
"argstype": {
"user_user_prompt": {
"name": "自定义_user message",
"refswitch": "use_user_user_prompt",
"rank": 5.1
},
"s": {
"type": "split",
"rank": 2.5
},
"s2": {
"type": "split",
"rank": 5.12
},
"API接口地址": {
"rank": 0
},
"SECRET_KEY": {
"rank": 1,
"name": "API Key",
"type": "textlist"
},
"model": {
"rank": 2,
"type": "lineedit_or_combo",
"list_function": "list_models",
"list_cache": "modellistcache"
},
"modellistcache": {
"type": "list_cache"
},
"top_p": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.01
},
"frequency_penalty": {
"type": "spin",
"min": 0,
"max": 2,
"step": 0.05
},
"max_tokens": {
"type": "intspin",
"min": 1,
"max": 1000000,
"step": 1
},
"自定义promt": {
"type": "multiline",
"refswitch": "使用自定义promt",
"name": "自定义_system prompt",
"rank": 5
},
"Temperature": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.1
}
}
},
"mdict": {
"use": false,
"name": "MDict",
@ -1495,10 +1639,6 @@
"use": false,
"name": "腾讯"
},
"feishu": {
"use": false,
"name": "飞书"
},
"ocrspace": {
"use": false,
"name": "ocrspace"
@ -1841,12 +1981,6 @@
"color": "blue",
"name": "火山"
},
"feishu": {
"type": "api",
"use": false,
"color": "blue",
"name": "飞书"
},
"papago": {
"use": false,
"color": "blue",
@ -1937,20 +2071,6 @@
"type": "pre",
"color": "blue",
"name": "实时编辑"
},
"baiduqianfan": {
"type": "api",
"use": false,
"color": "blue",
"name": "百度千帆大模型",
"is_gpt_like": true
},
"txhunyuan": {
"type": "api",
"use": false,
"color": "blue",
"name": "腾讯混元大模型",
"is_gpt_like": true
}
},
"ZoomFactor": 1,

View File

@ -115,12 +115,6 @@
}
}
},
"feishu": {
"args": {
"app_id": "",
"app_secret": ""
}
},
"docsumo": {
"args": {
"token": ""

View File

@ -90,20 +90,6 @@
}
}
},
"feishu": {
"args": {
"app_id": "",
"app_secret": ""
},
"argstype": {
"app_id": {
"type": "textlist"
},
"app_secret": {
"type": "textlist"
}
}
},
"caiyunapi": {
"args": {
"Token": ""
@ -277,95 +263,6 @@
}
}
},
"txhunyuan": {
"args": {
"secret_id": "",
"secret_key": "",
"Temperature": 0.3,
"top_p": 0.3,
"model": "hunyuan-lite",
"context_num": 0,
"use_user_prompt": false,
"user_prompt": "",
"user_user_prompt": "{sentence}",
"use_user_user_prompt": false,
"usingstream": true,
"other_args": "{}",
"use_other_args": false,
"s": ""
},
"argstype": {
"other_args": {
"type": "multiline",
"refswitch": "use_other_args",
"name": "其他参数"
},
"s": {
"type": "split",
"rank": 2.5
},
"secret_id": {
"rank": 0,
"name": "SecretId",
"type": "textlist"
},
"secret_key": {
"rank": 1,
"name": "SecretKey",
"type": "textlist"
},
"model": {
"rank": 2,
"type": "lineedit_or_combo",
"list": [
"hunyuan-lite",
"hunyuan-turbo",
"hunyuan-pro",
"hunyuan-standard",
"hunyuan-standard-256k",
"hunyuan-role",
"hunyuan-functioncall",
"hunyuan-code"
]
},
"top_p": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.01
},
"usingstream": {
"name": "流式输出",
"type": "switch",
"rank": 3
},
"user_prompt": {
"name": "自定义_system prompt",
"type": "multiline",
"refswitch": "use_user_prompt",
"rank": 5
},
"user_user_prompt": {
"name": "自定义_user message",
"refswitch": "use_user_user_prompt",
"rank": 5.1
},
"context_num": {
"name": "附带上下文个数",
"type": "intspin",
"min": 0,
"max": 99999,
"step": 1,
"rank": 4.9
},
"Temperature": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.1
}
}
},
"claude": {
"args": {
"BASE_URL": "https://api.anthropic.com",
@ -379,8 +276,6 @@
"流式输出": true,
"user_user_prompt": "{sentence}",
"use_user_user_prompt": false,
"other_args": "{}",
"use_other_args": false,
"s": "",
"s2": "",
"prefill": "",
@ -394,11 +289,6 @@
"prefill_use": {
"type": "switch"
},
"other_args": {
"type": "multiline",
"refswitch": "use_other_args",
"name": "其他参数"
},
"user_user_prompt": {
"name": "自定义_user message",
"refswitch": "use_user_user_prompt",
@ -577,110 +467,6 @@
}
}
},
"baiduqianfan": {
"args": {
"model": "ernie-4.0-8k",
"context_num": 0,
"API_KEY": "",
"SECRET_KEY": "",
"use_user_prompt": false,
"user_prompt": "",
"usingstream": true,
"Temperature": 0.3,
"top_p": 0.3,
"max_tokens": 1024,
"frequency_penalty": 0,
"user_user_prompt": "{sentence}",
"use_user_user_prompt": false,
"other_args": "{}",
"use_other_args": false,
"s": ""
},
"argstype": {
"other_args": {
"type": "multiline",
"refswitch": "use_other_args",
"name": "其他参数"
},
"user_user_prompt": {
"name": "自定义_user message",
"refswitch": "use_user_user_prompt",
"rank": 5.1
},
"s": {
"type": "split",
"rank": 2.5
},
"API_KEY": {
"rank": 0,
"name": "API Key",
"type": "textlist"
},
"SECRET_KEY": {
"rank": 1,
"name": "Secret Key",
"type": "textlist"
},
"model": {
"rank": 2,
"type": "lineedit_or_combo",
"list": [
"ernie-4.0-8k",
"ernie-4.0-turbo-8k",
"ernie-3.5-128k",
"ernie-3.5-8k",
"ernie-speed-pro-128k",
"ernie-speed-128k",
"ernie-speed-8k",
"ernie-lite-8k",
"ernie-tiny-8k"
]
},
"top_p": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.01
},
"frequency_penalty": {
"type": "spin",
"min": 0,
"max": 2,
"step": 0.05
},
"max_tokens": {
"type": "intspin",
"min": 1,
"max": 1000000,
"step": 1
},
"user_prompt": {
"type": "multiline",
"name": "自定义_system prompt",
"refswitch": "use_user_prompt",
"rank": 5
},
"usingstream": {
"name": "流式输出",
"type": "switch",
"rank": 3
},
"context_num": {
"name": "附带上下文个数",
"type": "intspin",
"min": 0,
"max": 99999,
"step": 1,
"rank": 4.9
},
"Temperature": {
"type": "spin",
"min": 0,
"max": 1,
"step": 0.1
}
}
},
"cohere": {
"args": {
"SECRET_KEY": "",
@ -693,8 +479,6 @@
"流式输出": true,
"use_user_user_prompt": false,
"user_user_prompt": "{sentence}",
"other_args": "{}",
"use_other_args": false,
"s": "",
"s2": "",
"prefill": "",
@ -712,11 +496,6 @@
"type": "split",
"rank": 5.12
},
"other_args": {
"type": "multiline",
"refswitch": "use_other_args",
"name": "其他参数"
},
"user_user_prompt": {
"name": "自定义_user message",
"refswitch": "use_user_user_prompt",
@ -979,8 +758,6 @@
"custom_prompt": "",
"user_user_prompt": "{sentence}",
"use_user_user_prompt": false,
"other_args": "{}",
"use_other_args": false,
"s": "",
"s2": "",
"usingstream": true,
@ -995,11 +772,6 @@
"prefill_use": {
"type": "switch"
},
"other_args": {
"type": "multiline",
"refswitch": "use_other_args",
"name": "其他参数"
},
"BASE_URL": {
"name": "API接口地址",
"rank": 0
@ -1058,60 +830,6 @@
}
}
},
"ort_sp": {
"args": {
"path": "",
"最大生成长度": 128,
"最小生成长度": 1,
"柱搜索数": 8,
"序列数": 1,
"过长惩罚": 1.4,
"重复惩罚": 1.7
},
"argstype": {
"path": {
"name": "路径",
"type": "file",
"dir": true
},
"序列数": {
"type": "intspin",
"min": 0,
"max": 10000,
"step": 1
},
"最大生成长度": {
"type": "intspin",
"min": 0,
"max": 10000,
"step": 1
},
"最小生成长度": {
"type": "intspin",
"min": 0,
"max": 10000,
"step": 1
},
"柱搜索数": {
"type": "intspin",
"min": 0,
"max": 10000,
"step": 1
},
"过长惩罚": {
"type": "spin",
"min": 0,
"max": 10000,
"step": 0.1
},
"重复惩罚": {
"type": "spin",
"min": 0,
"max": 10000,
"step": 0.1
}
}
},
"sakura": {
"args": {
"API接口地址": "http://127.0.0.1:8080/",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "استخدام ذاكرة التخزين المؤقت الترجمة",
"新": "جديد .",
"缩放系数": "معامل التكبير",
"最大生成长度": "أقصى طول الجيل",
"最小生成长度": "الحد الأدنى من طول الجيل",
"柱搜索数": "العمود رقم البحث",
"序列数": "رقم التسلسل",
"过长惩罚": "عقوبة طويلة",
"重复惩罚": "تكرار العقوبة",
"显示日语注音": "عرض صوتي ياباني",
"注音颜色": "لون صوتي",
"平假名": "هيراغانا",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "使用翻譯快取",
"新": "新",
"缩放系数": "縮放係數",
"最大生成长度": "最大生成長度",
"最小生成长度": "最小生成長度",
"柱搜索数": "柱搜尋數",
"序列数": "序列數",
"过长惩罚": "過長懲罰",
"重复惩罚": "重複懲罰",
"显示日语注音": "顯示日語注音",
"注音颜色": "注音顏色",
"平假名": "平假名",

View File

@ -405,12 +405,6 @@
"使用翻译缓存": "Použít mezipaměť pro překlad",
"新": "nový",
"缩放系数": "Skalační faktor",
"最大生成长度": "Maximální generovaná délka",
"最小生成长度": "Minimální generovaná délka",
"柱搜索数": "Počet vyhledávání sloupců",
"序列数": "Počet sekvencí",
"过长惩罚": "Nadměrný trest",
"重复惩罚": "Opakovaný trest",
"语言包": "Jazykový balíček",
"显示日语注音": "Zobrazit japonské fonetické anotace",
"注音颜色": "Fonetická barva",

View File

@ -405,12 +405,6 @@
"使用翻译缓存": "Übersetzungscache verwenden",
"新": "neu",
"缩放系数": "Skalierungsfaktor",
"最大生成长度": "Maximale erzeugte Länge",
"最小生成长度": "Minimale generierte Länge",
"柱搜索数": "Anzahl der Spaltensuche",
"序列数": "Anzahl der Sequenzen",
"过长惩罚": "Übermäßige Bestrafung",
"重复惩罚": "Wiederholte Strafe",
"语言包": "Sprachpaket",
"显示日语注音": "Japanische phonetische Anmerkungen anzeigen",
"注音颜色": "Phonetische Farbe",

View File

@ -398,12 +398,6 @@
"使用翻译缓存": "Use Translation Cache",
"新": "New",
"缩放系数": "Scale Factor",
"最大生成长度": "Max Generation Length",
"最小生成长度": "Min Generation Length",
"柱搜索数": "Beam Search Number",
"序列数": "Sequence Number",
"过长惩罚": "Length Penalty",
"重复惩罚": "Repetition Penalty",
"显示日语注音": "Show Furigana",
"注音颜色": "Furigana Color",
"平假名": "ひらがな (Hiragana)",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Usar caché de traducción",
"新": "Nuevo",
"缩放系数": "Coeficiente de escala",
"最大生成长度": "Longitud máxima generada",
"最小生成长度": "Longitud mínima de generación",
"柱搜索数": "Número de búsquedas de columnas",
"序列数": "Número de secuencias",
"过长惩罚": "Castigo excesivo",
"重复惩罚": "Repetir el castigo",
"显示日语注音": "Muestra la fonética japonesa",
"注音颜色": "Color de la nota",
"平假名": "Hirayama",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Utiliser le cache de traduction",
"新": "Nouveau",
"缩放系数": "Facteur de zoom",
"最大生成长度": "Longueur maximale de génération",
"最小生成长度": "Longueur minimale de génération",
"柱搜索数": "Nombre de recherches de colonnes",
"序列数": "Nombre de séquences",
"过长惩罚": "Pénalité trop longue",
"重复惩罚": "Punition répétée",
"显示日语注音": "Afficher les notes en japonais",
"注音颜色": "Couleur d'accent",
"平假名": "Hiragana",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Uso della cache delle traduzioni",
"新": "nuovo",
"缩放系数": "Fattore di scala",
"最大生成长度": "Lunghezza massima di generazione",
"最小生成长度": "Lunghezza minima di generazione",
"柱搜索数": "Numero di ricerche in colonna",
"序列数": "Numero di sequenze",
"过长惩罚": "Pena eccessiva",
"重复惩罚": "Pena ripetitiva",
"显示日语注音": "Mostra pinyin giapponese",
"注音颜色": "Colore pinyin",
"平假名": "Hiragana",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "翻訳キャッシュの使用",
"新": "新規",
"缩放系数": "スケーリング係数",
"最大生成长度": "最大生成長さ",
"最小生成长度": "最小生成長さ",
"柱搜索数": "カラムサーチ",
"序列数": "シーケンス数",
"过长惩罚": "長すぎる罰",
"重复惩罚": "繰り返し罰する.",
"显示日语注音": "日本語のルビを表示",
"注音颜色": "ルビの色",
"平假名": "ひらがな",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "번역 캐시 사용",
"新": "새",
"缩放系数": "배율 계수",
"最大生成长度": "최대 생성 길이",
"最小生成长度": "최소 생성 길이",
"柱搜索数": "원통 검색 수",
"序列数": "시퀀스 수",
"过长惩罚": "과도한 처벌",
"重复惩罚": "반복 처벌",
"显示日语注音": "일본어 메모 표시",
"注音颜色": "주음 색상",
"平假名": "히라가나",

View File

@ -405,12 +405,6 @@
"使用翻译缓存": "Vertaalcache gebruiken",
"新": "nieuw",
"缩放系数": "Schaalfactor",
"最大生成长度": "Maximale gegenereerde lengte",
"最小生成长度": "Minimale gegenereerde lengte",
"柱搜索数": "Aantal kolommen zoeken",
"序列数": "Aantal sequenties",
"过长惩罚": "Overmatige straf",
"重复惩罚": "Herhaalde straf",
"语言包": "Taalpakket",
"显示日语注音": "Japanse fonetische annotaties tonen",
"注音颜色": "Fonetische kleur",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Korzystanie z pamięci podręcznej tłumaczeń",
"新": "nowy",
"缩放系数": "Współczynnik skalowania",
"最大生成长度": "Maksymalna długość generacji",
"最小生成长度": "Minimalna długość generacji",
"柱搜索数": "Liczba wyszukiwania kolumn",
"序列数": "Liczba sekwencji",
"过长惩罚": "Nadmierna kara",
"重复惩罚": "Kary powtarzające się",
"显示日语注音": "Wyświetl japoński pinyin",
"注音颜色": "Kolor pinyin",
"平假名": "HiraganaName",

View File

@ -405,12 +405,6 @@
"使用翻译缓存": "Usar a 'cache' de tradução",
"新": "novo",
"缩放系数": "Fator de escala",
"最大生成长度": "Comprimento máximo gerado",
"最小生成长度": "Comprimento mínimo gerado",
"柱搜索数": "Contagem de pesquisas em colunas",
"序列数": "Número de sequências",
"过长惩罚": "Pena excessiva",
"重复惩罚": "Repetição da punição",
"语言包": "Pacote de Idiomas",
"显示日语注音": "Mostrar as anotações fonéticas japonesas",
"注音颜色": "Cor fonética",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Использовать кэш перевода",
"新": "Новый",
"缩放系数": "Коэффициент масштабирования",
"最大生成长度": "Максимальная длина генерации",
"最小生成长度": "Минимальная длина генерации",
"柱搜索数": "Количество поисковых столбцов",
"序列数": "Количество последовательностей",
"过长惩罚": "Слишком длительное наказание",
"重复惩罚": "Повторное наказание",
"显示日语注音": "Показать японское произношение",
"注音颜色": "Цвет звука",
"平假名": "Псевдоним",

View File

@ -405,12 +405,6 @@
"使用翻译缓存": "Använd översättningskamera",
"新": "ny",
"缩放系数": "Skalningsfaktor",
"最大生成长度": "Maximal genererad längd",
"最小生成长度": "Minsta genererad längd",
"柱搜索数": "Antal kolumnsökningar",
"序列数": "Antal sekvenser",
"过长惩罚": "Överdrivet straff",
"重复惩罚": "Upprepat straff",
"语言包": "Språkpaket",
"显示日语注音": "Visa japanska fonetiska anteckningar",
"注音颜色": "Fonetisk färg",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "ใช้แคชการแปล",
"新": "ใหม่",
"缩放系数": "ค่าสัมประสิทธิ์การซูม",
"最大生成长度": "ความยาวสูงสุด",
"最小生成长度": "ความยาวการสร้างขั้นต่ำ",
"柱搜索数": "จำนวนการค้นหาคอลัมน์",
"序列数": "จำนวนลำดับ",
"过长惩罚": "การลงโทษที่ยาวนานเกินไป",
"重复惩罚": "การลงโทษซ้ำ",
"显示日语注音": "แสดงหมายเหตุภาษาญี่ปุ่น",
"注音颜色": "สี Injection",
"平假名": "ฮิรางานะ",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Çeviri Cache kullanılıyor",
"新": "Yeni",
"缩放系数": "Scale factor",
"最大生成长度": "En yüksek nesil uzunluğu",
"最小生成长度": "En az nesil uzunluğu",
"柱搜索数": "Sütun aramalarının sayısı",
"序列数": "Sezenler sayısı",
"过长惩罚": "Çok fazla ceza",
"重复惩罚": "Tekrar cezalandırıcı",
"显示日语注音": "Display Japanese Pinyin",
"注音颜色": "Pinyin rengi",
"平假名": "HiraganaKCharselect unicode block name",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Використання кешу перекладу",
"新": "новий",
"缩放系数": "Фактор масштабу",
"最大生成长度": "Максимальна довжина створення",
"最小生成长度": "Мінімальна довжина створення",
"柱搜索数": "Кількість пошуків стовпчиків",
"序列数": "Кількість послідовностей",
"过长惩罚": "Великий покарання",
"重复惩罚": "Повторює покарання",
"显示日语注音": "Показувати японський пінін",
"注音颜色": "Колір пініна",
"平假名": іраганаworld. kgm",

View File

@ -399,12 +399,6 @@
"使用翻译缓存": "Sử dụng Translation Cache",
"新": "Mới",
"缩放系数": "Hệ số thu phóng",
"最大生成长度": "Chiều dài tạo tối đa",
"最小生成长度": "Chiều dài tạo tối thiểu",
"柱搜索数": "Số lượng tìm kiếm cột",
"序列数": "Số dãy",
"过长惩罚": "Hình phạt quá dài",
"重复惩罚": "Hình phạt lặp lại",
"显示日语注音": "Hiện chú thích tiếng Nhật",
"注音颜色": "Màu chú thích",
"平假名": "Name",

View File

@ -414,12 +414,6 @@
"使用翻译缓存": "",
"新": "",
"缩放系数": "",
"最大生成长度": "",
"最小生成长度": "",
"柱搜索数": "",
"序列数": "",
"过长惩罚": "",
"重复惩罚": "",
"语言包": "",
"显示日语注音": "",
"注音颜色": "",