mirror of
https://github.com/HIllya51/LunaTranslator.git
synced 2024-12-29 00:24:13 +08:00
fix
This commit is contained in:
parent
8787248ae1
commit
ab1d8b8338
@ -64,6 +64,14 @@ class TS(basetrans):
|
|||||||
message.append(self.context[i * 2])
|
message.append(self.context[i * 2])
|
||||||
message.append(self.context[i * 2 + 1])
|
message.append(self.context[i * 2 + 1])
|
||||||
|
|
||||||
|
def _gpt_common_parse_context_2(self, messages, context, contextnum):
|
||||||
|
msgs = []
|
||||||
|
self._gpt_common_parse_context(msgs, context, contextnum)
|
||||||
|
__ja, __zh = [], []
|
||||||
|
for i, _ in enumerate(msgs):
|
||||||
|
[__ja, __zh][i % 2 == 0].append(_.strip())
|
||||||
|
messages.append({"role": "assistant", "content": "\n".join(__zh)})
|
||||||
|
|
||||||
def make_messages(self, query, gpt_dict=None):
|
def make_messages(self, query, gpt_dict=None):
|
||||||
contextnum = (
|
contextnum = (
|
||||||
self.config["append_context_num"] if self.config["use_context"] else 0
|
self.config["append_context_num"] if self.config["use_context"] else 0
|
||||||
@ -75,7 +83,7 @@ class TS(basetrans):
|
|||||||
"content": "你是一个轻小说翻译模型,可以流畅通顺地以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,不擅自添加原文中没有的代词。",
|
"content": "你是一个轻小说翻译模型,可以流畅通顺地以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,不擅自添加原文中没有的代词。",
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
self._gpt_common_parse_context(messages, self.context, contextnum)
|
self._gpt_common_parse_context_2(messages, self.context, contextnum)
|
||||||
messages.append(
|
messages.append(
|
||||||
{"role": "user", "content": f"将下面的日文文本翻译成中文:{query}"}
|
{"role": "user", "content": f"将下面的日文文本翻译成中文:{query}"}
|
||||||
)
|
)
|
||||||
@ -86,7 +94,7 @@ class TS(basetrans):
|
|||||||
"content": "你是一个轻小说翻译模型,可以流畅通顺地使用给定的术语表以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,注意不要混淆使役态和被动态的主语和宾语,不要擅自添加原文中没有的代词,也不要擅自增加或减少换行。",
|
"content": "你是一个轻小说翻译模型,可以流畅通顺地使用给定的术语表以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,注意不要混淆使役态和被动态的主语和宾语,不要擅自添加原文中没有的代词,也不要擅自增加或减少换行。",
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
self._gpt_common_parse_context(messages, self.context, contextnum)
|
self._gpt_common_parse_context_2(messages, self.context, contextnum)
|
||||||
gpt_dict_raw_text = self.make_gpt_dict_text(gpt_dict)
|
gpt_dict_raw_text = self.make_gpt_dict_text(gpt_dict)
|
||||||
content = (
|
content = (
|
||||||
"根据以下术语表(可以为空):\n"
|
"根据以下术语表(可以为空):\n"
|
||||||
@ -103,7 +111,7 @@ class TS(basetrans):
|
|||||||
"content": "你是一个轻小说翻译模型,可以流畅通顺地以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,不擅自添加原文中没有的代词。",
|
"content": "你是一个轻小说翻译模型,可以流畅通顺地以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,不擅自添加原文中没有的代词。",
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
self._gpt_common_parse_context(messages, self.context, contextnum)
|
self._gpt_common_parse_context_2(messages, self.context, contextnum)
|
||||||
gpt_dict_raw_text = self.make_gpt_dict_text(gpt_dict)
|
gpt_dict_raw_text = self.make_gpt_dict_text(gpt_dict)
|
||||||
if gpt_dict_raw_text:
|
if gpt_dict_raw_text:
|
||||||
content = (
|
content = (
|
||||||
@ -215,9 +223,9 @@ class TS(basetrans):
|
|||||||
self.get_client(self.config["API接口地址"])
|
self.get_client(self.config["API接口地址"])
|
||||||
frequency_penalty = float(self.config["frequency_penalty"])
|
frequency_penalty = float(self.config["frequency_penalty"])
|
||||||
|
|
||||||
messages = self.make_messages(query)
|
messages = self.make_messages(query, gpt_dict=gpt_dict)
|
||||||
if bool(self.config["流式输出"]) == True:
|
if bool(self.config["流式输出"]) == True:
|
||||||
output = self.send_request_stream(messages, gpt_dict=gpt_dict)
|
output = self.send_request_stream(messages)
|
||||||
completion_tokens = 0
|
completion_tokens = 0
|
||||||
output_text = ""
|
output_text = ""
|
||||||
for o in output:
|
for o in output:
|
||||||
@ -229,7 +237,7 @@ class TS(basetrans):
|
|||||||
else:
|
else:
|
||||||
finish_reason = o["choices"][0]["finish_reason"]
|
finish_reason = o["choices"][0]["finish_reason"]
|
||||||
else:
|
else:
|
||||||
output = self.send_request(messages, gpt_dict=gpt_dict)
|
output = self.send_request(messages)
|
||||||
for o in output:
|
for o in output:
|
||||||
completion_tokens = o["usage"]["completion_tokens"]
|
completion_tokens = o["usage"]["completion_tokens"]
|
||||||
output_text = o["choices"][0]["message"]["content"]
|
output_text = o["choices"][0]["message"]["content"]
|
||||||
@ -245,9 +253,7 @@ class TS(basetrans):
|
|||||||
# print("------------------清零------------------")
|
# print("------------------清零------------------")
|
||||||
if bool(self.config["流式输出"]) == True:
|
if bool(self.config["流式输出"]) == True:
|
||||||
output = self.send_request_stream(
|
output = self.send_request_stream(
|
||||||
messages,
|
messages, frequency_penalty=frequency_penalty
|
||||||
frequency_penalty=frequency_penalty,
|
|
||||||
gpt_dict=gpt_dict,
|
|
||||||
)
|
)
|
||||||
completion_tokens = 0
|
completion_tokens = 0
|
||||||
output_text = ""
|
output_text = ""
|
||||||
@ -262,9 +268,7 @@ class TS(basetrans):
|
|||||||
finish_reason = o["choices"][0]["finish_reason"]
|
finish_reason = o["choices"][0]["finish_reason"]
|
||||||
else:
|
else:
|
||||||
output = self.send_request(
|
output = self.send_request(
|
||||||
messages,
|
messages, frequency_penalty=frequency_penalty
|
||||||
frequency_penalty=frequency_penalty,
|
|
||||||
gpt_dict=gpt_dict,
|
|
||||||
)
|
)
|
||||||
yield "\0"
|
yield "\0"
|
||||||
for o in output:
|
for o in output:
|
||||||
@ -279,6 +283,5 @@ class TS(basetrans):
|
|||||||
+ output_text
|
+ output_text
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
|
self.context.append(query)
|
||||||
self.context.append(messages[-1])
|
self.context.append(output_text)
|
||||||
self.context.append({"role": "assistant", "content": output_text})
|
|
||||||
|
@ -29,7 +29,7 @@ include(generate_product_version)
|
|||||||
|
|
||||||
set(VERSION_MAJOR 5)
|
set(VERSION_MAJOR 5)
|
||||||
set(VERSION_MINOR 42)
|
set(VERSION_MINOR 42)
|
||||||
set(VERSION_PATCH 9)
|
set(VERSION_PATCH 10)
|
||||||
|
|
||||||
add_library(pch pch.cpp)
|
add_library(pch pch.cpp)
|
||||||
target_precompile_headers(pch PUBLIC pch.h)
|
target_precompile_headers(pch PUBLIC pch.h)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user