be more specific for error message (#1409)

### What problem does this PR solve?

#918 

### Type of change

- [x] Refactoring
This commit is contained in:
KevinHuSh
2024-07-08 09:32:44 +08:00
committed by GitHub
parent dcb3fb2073
commit b3ebc66b13
9 changed files with 126 additions and 61 deletions

View File

@@ -33,31 +33,31 @@ class GenerateParam(ComponentParamBase):
super().__init__()
self.llm_id = ""
self.prompt = ""
self.max_tokens = 256
self.temperature = 0.1
self.top_p = 0.3
self.presence_penalty = 0.4
self.frequency_penalty = 0.7
self.max_tokens = 0
self.temperature = 0
self.top_p = 0
self.presence_penalty = 0
self.frequency_penalty = 0
self.cite = True
#self.parameters = []
self.parameters = []
def check(self):
self.check_decimal_float(self.temperature, "Temperature")
self.check_decimal_float(self.presence_penalty, "Presence penalty")
self.check_decimal_float(self.frequency_penalty, "Frequency penalty")
self.check_positive_number(self.max_tokens, "Max tokens")
self.check_decimal_float(self.top_p, "Top P")
self.check_empty(self.llm_id, "LLM")
#self.check_defined_type(self.parameters, "Parameters", ["list"])
self.check_decimal_float(self.temperature, "[Generate] Temperature")
self.check_decimal_float(self.presence_penalty, "[Generate] Presence penalty")
self.check_decimal_float(self.frequency_penalty, "[Generate] Frequency penalty")
self.check_nonnegative_number(self.max_tokens, "[Generate] Max tokens")
self.check_decimal_float(self.top_p, "[Generate] Top P")
self.check_empty(self.llm_id, "[Generate] LLM")
# self.check_defined_type(self.parameters, "Parameters", ["list"])
def gen_conf(self):
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
}
conf = {}
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
if self.temperature > 0: conf["temperature"] = self.temperature
if self.top_p > 0: conf["top_p"] = self.top_p
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
return conf
class Generate(ComponentBase):
@@ -69,12 +69,15 @@ class Generate(ComponentBase):
retrieval_res = self.get_input()
input = "\n- ".join(retrieval_res["content"])
for para in self._param.parameters:
cpn = self._canvas.get_component(para["component_id"])["obj"]
_, out = cpn.output(allow_partial=False)
kwargs[para["key"]] = "\n - ".join(out["content"])
kwargs["input"] = input
for n, v in kwargs.items():
#prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
prompt = re.sub(r"\{%s\}"%n, str(v), prompt)
# prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
prompt = re.sub(r"\{%s\}" % n, str(v), prompt)
if kwargs.get("stream"):
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
@@ -82,23 +85,25 @@ class Generate(ComponentBase):
if "empty_response" in retrieval_res.columns:
return Generate.be_output(input)
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size), self._param.gen_conf())
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
self._param.gen_conf())
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
ans, idx = retrievaler.insert_citations(ans,
[ck["content_ltks"]
for _, ck in retrieval_res.iterrows()],
[ck["vector"]
for _,ck in retrieval_res.iterrows()],
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, self._canvas.get_embedding_model()),
tkweight=0.7,
vtweight=0.3)
[ck["content_ltks"]
for _, ck in retrieval_res.iterrows()],
[ck["vector"]
for _, ck in retrieval_res.iterrows()],
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
self._canvas.get_embedding_model()),
tkweight=0.7,
vtweight=0.3)
del retrieval_res["vector"]
retrieval_res = retrieval_res.to_dict("records")
df = []
for i in idx:
df.append(retrieval_res[int(i)])
r = re.search(r"^((.|[\r\n])*? ##%s\$\$)"%str(i), ans)
r = re.search(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), ans)
assert r, f"{i} => {ans}"
df[-1]["content"] = r.group(1)
ans = re.sub(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), "", ans)
@@ -116,20 +121,22 @@ class Generate(ComponentBase):
return
answer = ""
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size), self._param.gen_conf()):
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size),
self._param.gen_conf()):
res = {"content": ans, "reference": []}
answer = ans
yield res
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
answer, idx = retrievaler.insert_citations(answer,
[ck["content_ltks"]
for _, ck in retrieval_res.iterrows()],
[ck["vector"]
for _, ck in retrieval_res.iterrows()],
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, self._canvas.get_embedding_model()),
tkweight=0.7,
vtweight=0.3)
[ck["content_ltks"]
for _, ck in retrieval_res.iterrows()],
[ck["vector"]
for _, ck in retrieval_res.iterrows()],
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
self._canvas.get_embedding_model()),
tkweight=0.7,
vtweight=0.3)
doc_ids = set([])
recall_docs = []
for i in idx:
@@ -152,5 +159,3 @@ class Generate(ComponentBase):
yield res
self.set_output(res)