从新提交到gitee 仓库

This commit is contained in:
qcloud
2025-02-06 23:34:26 +08:00
parent e678819f70
commit c88312a914
62 changed files with 211935 additions and 7500 deletions

3
.gitignore vendored
View File

@@ -36,3 +36,6 @@ sdk/python/ragflow.egg-info/
sdk/python/build/
sdk/python/dist/
sdk/python/ragflow_sdk.egg-info/
# Exclude virtual env
.venv/

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -145,6 +145,7 @@ def load_user(web_request):
if authorization:
try:
access_token = str(jwt.loads(authorization))
logging.info(f"--@login_manager.request_loader {access_token} {authorization}") # cyx
user = UserService.query(
access_token=access_token, status=StatusEnum.VALID.value
)

View File

@@ -16,6 +16,7 @@
import json
import re
import traceback
import logging
from copy import deepcopy
from api.db.services.user_service import UserTenantService
from flask import request, Response
@@ -126,12 +127,37 @@ def list_convsersation():
return get_json_result(
data=False, message='Only owner of dialog authorized for this operation.',
code=settings.RetCode.OPERATING_ERROR)
convs = ConversationService.query(
dialog_id=dialog_id,
order_by=ConversationService.model.create_time,
reverse=True)
convs = [d.to_dict() for d in convs]
return get_json_result(data=convs)
if 0:
# 20250127 cyx 修改,如果不限定返回的行数,内容太大了,会堵塞上传
convs = ConversationService.query(
dialog_id=dialog_id,
order_by=ConversationService.model.create_time,
reverse=True)
convs = [d.to_dict() for d in convs]
else:
id = request.args.get("id")
name = request.args.get("name")
page_number = int(request.args.get("current_page", 1))
items_per_page = int(request.args.get("page_size", 40))
orderby = request.args.get("orderby", "create_time")
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
desc = False
else:
desc = True
# 获取模型的所有字段
all_fields = ConversationService.model._meta.sorted_field_names
# 定义需要排除的字段
exclude_fields = {"message", "reference"}
# 动态生成 columns排除指定字段
cols = [field for field in all_fields if field not in exclude_fields]
total, convs = ConversationService.get_list(dialog_id, page_number, items_per_page, orderby, desc, id, name ,cols)
for conv in convs:
conv['name'] = f"{conv['name'] } {conv['update_date']}"
# logging.info(f"list_convsersation--{dialog_id} return {len(convs)}") # cyx
return get_json_result(data={'total':total,'data':convs})
except Exception as e:
return server_error_response(e)

View File

@@ -312,7 +312,7 @@ def delete_factory():
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
return get_json_result(data=True)
# 用户已经添加的模型 cyx 2025-01-26
@manager.route('/my_llms', methods=['GET'])
@login_required
def my_llms():
@@ -342,11 +342,13 @@ def list_app():
model_type = request.args.get("model_type")
try:
objs = TenantLLMService.query(tenant_id=current_user.id)
# 在添加模型时会设置url 和 api_key 并且会检查访问有效性 cyx
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
llms = LLMService.get_all()
llms = [m.to_dict()
for m in llms if m.status == StatusEnum.VALID.value and m.fid not in weighted]
for m in llms:
# 设置模型是否可以访问 cyx
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
@@ -362,7 +364,6 @@ def list_app():
if m["fid"] not in res:
res[m["fid"]] = []
res[m["fid"]].append(m)
return get_json_result(data=res)
except Exception as e:
return server_error_response(e)

View File

@@ -0,0 +1,51 @@
#
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import request
from api import settings
from api.db import StatusEnum
from api.db.services.dialog_service import DialogService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.llm_service import TenantLLMService
from api.db.services.user_service import TenantService
from api.utils import get_uuid
from api.utils.api_utils import get_error_data_result, token_required
from api.utils.api_utils import get_result
# 用户已经添加的模型 cyx 2025-01-26
@manager.route('/get_llms', methods=['GET'])
@token_required
def my_llms(tenant_id):
# request.args.get("id") 通过request.args.get 获取GET 方法传入的参数
model_type = request.args.get("type")
try:
res = {}
for o in TenantLLMService.get_my_llms(tenant_id):
if model_type is None or o["model_type"] == model_type: # 增加按类型的筛选
if o["llm_factory"] not in res:
res[o["llm_factory"]] = {
"tags": o["tags"],
"llm": []
}
res[o["llm_factory"]]["llm"].append({
"type": o["model_type"],
"name": o["llm_name"],
"used_token": o["used_tokens"]
})
return get_result(data=res)
except Exception as e:
return get_error_data_result(message=f"Get LLMS error {e}")

View File

@@ -13,12 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import re, io
import json
import logging
from copy import deepcopy
from uuid import uuid4
from api.db import LLMType
from flask import request, Response
from flask import request, Response, jsonify
from api.db.services.dialog_service import ask
from agent.canvas import Canvas
from api.db import StatusEnum
@@ -31,11 +32,13 @@ from api.utils import get_uuid
from api.utils.api_utils import get_error_data_result
from api.utils.api_utils import get_result, token_required
from api.db.services.llm_service import LLMBundle
import uuid
import queue
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
@token_required
def create(tenant_id,chat_id):
def create(tenant_id, chat_id):
req = request.json
req["dialog_id"] = chat_id
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
@@ -77,7 +80,7 @@ def create_agent_session(tenant_id, agent_id):
conv = {
"id": get_uuid(),
"dialog_id": cvs.id,
"user_id": req.get("usr_id","") if isinstance(req, dict) else "",
"user_id": req.get("usr_id", "") if isinstance(req, dict) else "",
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
"source": "agent"
}
@@ -88,11 +91,11 @@ def create_agent_session(tenant_id, agent_id):
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT'])
@token_required
def update(tenant_id,chat_id,session_id):
def update(tenant_id, chat_id, session_id):
req = request.json
req["dialog_id"] = chat_id
conv_id = session_id
conv = ConversationService.query(id=conv_id,dialog_id=chat_id)
conv = ConversationService.query(id=conv_id, dialog_id=chat_id)
if not conv:
return get_error_data_result(message="Session does not exist")
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
@@ -110,9 +113,9 @@ def update(tenant_id,chat_id,session_id):
@manager.route('/chats/<chat_id>/completions', methods=['POST'])
@token_required
def completion(tenant_id, chat_id):
def completion(tenant_id, chat_id): # chat_id 和 别的文件中的dialog_id 应该是一个意思? cyx 2025-01-25
req = request.json
if not req.get("session_id"):
if not req.get("session_id"): # session_id 和 别的文件中的conversation_id 应该是一个意思? cyx 2025-01-25
conv = {
"id": get_uuid(),
"dialog_id": chat_id,
@@ -123,12 +126,18 @@ def completion(tenant_id, chat_id):
return get_error_data_result(message="`name` can not be empty.")
ConversationService.save(**conv)
e, conv = ConversationService.get_by_id(conv["id"])
session_id=conv.id
session_id = conv.id
else:
session_id = req.get("session_id")
if not req.get("question"):
return get_error_data_result(message="Please input your question.")
conv = ConversationService.query(id=session_id,dialog_id=chat_id)
#conv = ConversationService.query(id=session_id, dialog_id=chat_id)
# 以下改动是为了限制从历史记录中取过多的记录
history_limit = req.get("history_limit", None)
if history_limit is not None:
conv = ConversationService.query(id=session_id, dialog_id=chat_id, reverse=True, order_by="create_time")
else:
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
if not conv:
return get_error_data_result(message="Session does not exist")
conv = conv[0]
@@ -141,13 +150,25 @@ def completion(tenant_id, chat_id):
"id": str(uuid4())
}
conv.message.append(question)
# 第一次遍历,计算 assistant 消息的总数
assistant_total_count = sum(1 for m in conv.message if m["role"] == "assistant")
# 第二次遍历,按条件添加消息到 msg
current_assistant_count = 0 # 跟踪当前添加的 assistant 消息数
for m in conv.message:
if m["role"] == "system": continue
if m["role"] == "assistant" and not msg: continue
if m['role'] == "assistant":
# 如果 assistant 消息超出需要保留的数量,跳过
# 检查 history_limit 是否为 NoneNone 表示不限制
if history_limit is not None and current_assistant_count < assistant_total_count - history_limit:
current_assistant_count += 1
continue
msg.append(m)
message_id = msg[-1].get("id")
e, dia = DialogService.get_by_id(conv.dialog_id)
logging.info(f"/chats/{chat_id}/completions req={req}--dale --2 history_limit={history_limit} dia {dia}") # cyx
if not conv.reference:
conv.reference = []
conv.message.append({"role": "assistant", "content": "", "id": message_id})
@@ -182,19 +203,22 @@ def completion(tenant_id, chat_id):
chunk_list.append(new_chunk)
reference["chunks"] = chunk_list
ans["id"] = message_id
ans["session_id"]=session_id
ans["session_id"] = session_id
def stream():
nonlocal dia, msg, req, conv
try:
for ans in chat(dia, msg, **req):
fillin_conv(ans)
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
ConversationService.update_by_id(conv.id, conv.to_dict())
except Exception as e:
yield "data:" + json.dumps({"code": 500, "message": str(e),
"data": {"answer": "**ERROR**: " + str(e),"reference": []}},
ensure_ascii=False) + "\n\n"
logging.info(f"sessions--3 /chats/<chat_id>/completions error {e} ") # cyx
# yield "data:" + json.dumps({"code": 500, "message": str(e),
# "data": {"answer": "**ERROR**: " + str(e),"reference": []}},
# ensure_ascii=False) + "\n\n"
# cyx 2024 12 04 不把错误返回给前端
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
if req.get("stream", True):
@@ -216,6 +240,185 @@ def completion(tenant_id, chat_id):
return get_result(data=answer)
# 全角字符到半角字符的映射
def fullwidth_to_halfwidth(s):
full_to_half_map = {
'': '!', '': '"', '': '#', '': '$', '': '%', '': '&', '': "'",
'': '(', '': ')', '': '*', '': '+', '': ',', '': '-', '': '.',
'': '/', '': ':', '': ';', '': '<', '': '=', '': '>', '': '?',
'': '@', '': '[', '': '\\', '': ']', '': '^', '_': '_', '': '`',
'': '{', '': '|', '': '}', '': '~', '': '', '': '', '': '',
'': '', '': ',', '': '.', '': '-', '': '.', '': '', '': '',
'': '', '': '', '': ':'
}
return ''.join(full_to_half_map.get(char, char) for char in s)
def is_dale(s):
full_to_half_map = {
'': '!', '': '"', '': '#', '': '$', '': '%', '': '&', '': "'",
'': '(', '': ')', '': '*', '': '+', '': ',', '': '-', '': '.',
'': '/', '': ':', '': ';', '': '<', '': '=', '': '>', '': '?',
'': '@', '': '[', '': '\\', '': ']', '': '^', '_': '_', '': '`',
'': '{', '': '|', '': '}', '': '~', '': '', '': '', '': '',
'': '', '': ',', '': '.', '': '-', '': '.', '': '', '': '',
'': '', '': '', '': ':', '': '.'
}
def extract_text_from_markdown(markdown_text):
# 移除Markdown标题
text = re.sub(r'#\s*[^#]+', '', markdown_text)
# 移除内联代码块
text = re.sub(r'`[^`]+`', '', text)
# 移除代码块
text = re.sub(r'```[\s\S]*?```', '', text)
# 移除加粗和斜体
text = re.sub(r'[*_]{1,3}(?=\S)(.*?\S[*_]{1,3})', '', text)
# 移除链接
text = re.sub(r'\[.*?\]\(.*?\)', '', text)
# 移除图片
text = re.sub(r'!\[.*?\]\(.*?\)', '', text)
# 移除HTML标签
text = re.sub(r'<[^>]+>', '', text)
# 转换标点符号
# text = re.sub(r'[^\w\s]', '', text)
text = fullwidth_to_halfwidth(text)
# 移除多余的空格
text = re.sub(r'\s+', ' ', text).strip()
return text
def split_text_at_punctuation(text, chunk_size=100):
# 使用正则表达式找到所有的标点符号和特殊字符
punctuation_pattern = r'[\s,.!?;:\-\\(\)\[\]{}"\'\\\/]+'
tokens = re.split(punctuation_pattern, text)
# 移除空字符串
tokens = [token for token in tokens if token]
# 存储最终的文本块
chunks = []
current_chunk = ''
for token in tokens:
if len(current_chunk) + len(token) <= chunk_size:
# 如果添加当前token后长度不超过chunk_size则添加到当前块
current_chunk += (token + ' ')
else:
# 如果长度超过chunk_size则将当前块添加到chunks列表并开始新块
chunks.append(current_chunk.strip())
current_chunk = token + ' '
# 添加最后一个块(如果有剩余)
if current_chunk:
chunks.append(current_chunk.strip())
return chunks
audio_text_cache = {}
@manager.route('/chats/<chat_id>/tts/<audio_stream_id>', methods=['GET'])
def dialog_tts_get(chat_id, audio_stream_id):
tts_info = audio_text_cache.pop(audio_stream_id, None)
req = tts_info
if not req:
return get_error_data_result(message="Audio stream not found or expired.")
audio_stream = req.get('audio_stream')
tenant_id = req.get('tenant_id')
chat_id = req.get('chat_id')
text = req.get('text', "..")
model_name = req.get('model_name')
dia = DialogService.get(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
if not dia:
return get_error_data_result(message="You do not own the chat")
tts_model_name = dia.tts_id
if model_name: tts_model_name = model_name
tts_mdl = LLMBundle(dia.tenant_id, LLMType.TTS, tts_model_name) # dia.tts_id)
def stream_audio():
try:
for chunk in tts_mdl.tts(text):
yield chunk
except Exception as e:
yield ("data:" + json.dumps({"code": 500, "message": str(e),
"data": {"answer": "**ERROR**: " + str(e)}},
ensure_ascii=False)).encode('utf-8')
def generate():
data = audio_stream.read(1024)
while data:
yield data
data = audio_stream.read(1024)
if audio_stream:
# 确保流的位置在开始处
audio_stream.seek(0)
resp = Response(generate(), mimetype="audio/mpeg")
else:
resp = Response(stream_audio(), mimetype="audio/mpeg")
resp.headers.add_header("Cache-Control", "no-cache")
resp.headers.add_header("Connection", "keep-alive")
resp.headers.add_header("X-Accel-Buffering", "no")
return resp
@manager.route('/chats/<chat_id>/tts', methods=['POST'])
@token_required
def dialog_tts_post(tenant_id, chat_id):
req = request.json
if not req.get("text"):
return get_error_data_result(message="Please input your question.")
delay_gen_audio = req.get('delay_gen_audio', False)
# text = extract_text_from_markdown(req.get('text'))
text = req.get('text')
audio_stream_id = req.get('audio_stream_id')
# logging.info(f"request tts audio url:{text} audio_stream_id:{audio_stream_id} ")
if audio_stream_id is None:
audio_stream_id = str(uuid.uuid4())
# 在这里生成音频流并存储到内存中
model_name = req.get('model_name')
dia = DialogService.get(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
tts_model_name = dia.tts_id
if model_name: tts_model_name = model_name
logging.info(f"---tts {tts_model_name}")
tts_mdl = LLMBundle(dia.tenant_id, LLMType.TTS, tts_model_name) # dia.tts_id)
if delay_gen_audio:
audio_stream = None
else:
audio_stream = io.BytesIO()
audio_text_cache[audio_stream_id] = {'text': text, 'chat_id': chat_id, "tenant_id": tenant_id,
'audio_stream': audio_stream,'model_name':model_name} # 缓存文本以便后续生成音频流
if delay_gen_audio is False:
try:
"""
for txt in re.split(r"[,。/《》?;:!\n\r:;]+", text):
try:
if txt is None or txt.strip() == "":
continue
for chunk in tts_mdl.tts(txt):
audio_stream.write(chunk)
except Exception as e:
continue
"""
if text is None or text.strip() == "":
audio_stream.write(b'\x00' * 100)
else:
for chunk in tts_mdl.tts(text):
audio_stream.write(chunk)
except Exception as e:
return get_error_data_result(message="get tts audio stream error.")
# 构建音频流URL
audio_stream_url = f"/chats/{chat_id}/tts/{audio_stream_id}"
logging.info(f"--return request tts audio url {audio_stream_id} {audio_stream_url}")
# 返回音频流URL
return jsonify({"tts_url": audio_stream_url, "audio_stream_id": audio_stream_id})
@manager.route('/agents/<agent_id>/completions', methods=['POST'])
@token_required
def agent_completion(tenant_id, agent_id):
@@ -235,7 +438,7 @@ def agent_completion(tenant_id, agent_id):
conv = {
"id": session_id,
"dialog_id": cvs.id,
"user_id": req.get("user_id",""),
"user_id": req.get("user_id", ""),
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
"source": "agent"
}
@@ -251,9 +454,9 @@ def agent_completion(tenant_id, agent_id):
question = req.get("question")
if not question:
return get_error_data_result("`question` is required.")
question={
"role":"user",
"content":question,
question = {
"role": "user",
"content": question,
"id": str(uuid4())
}
messages.append(question)
@@ -308,6 +511,7 @@ def agent_completion(tenant_id, agent_id):
if 'docnm_kwd' in chunk_i:
chunk_i['doc_name'] = chunk_i['docnm_kwd']
chunk_i.pop('docnm_kwd')
conv.message.append(msg[-1])
if not conv.reference:
@@ -375,9 +579,26 @@ def agent_completion(tenant_id, agent_id):
return get_result(data=result)
# added by cyx
# 打印 ConversationService.model 的表名及字段定义
def print_table_info(service):
model = service.model # 获取关联的模型
if model is None:
print("No model associated with the service.")
return
# 打印表名
logging.info(f"Table Name: {model._meta.table_name}")
# 打印所有字段及其定义
logging.info("Fields and Definitions:")
for field_name, field in model._meta.fields.items():
print(f" {field_name}: {field}")
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
@token_required
def list_session(chat_id,tenant_id):
def list_session(chat_id, tenant_id):
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
id = request.args.get("id")
@@ -389,7 +610,8 @@ def list_session(chat_id,tenant_id):
desc = False
else:
desc = True
convs = ConversationService.get_list(chat_id,page_number,items_per_page,orderby,desc,id,name)
print_table_info(ConversationService) # cyx
convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name)
if not convs:
return get_result(data=[])
for conv in convs:
@@ -429,9 +651,33 @@ def list_session(chat_id,tenant_id):
return get_result(data=convs)
# added by cyx 20241201
@manager.route('/chats/<chat_id>/sessions_summary', methods=['GET'])
@token_required
def sessions_summary(chat_id, tenant_id):
# 校验用户是否拥有指定的会话助手
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
# 统计会话概要信息
summaries = ConversationService.query_sessions_summary()
# 过滤结果,仅返回属于指定 chat_id 的记录
filtered_summaries = [
summary for summary in summaries if summary["dialog_id"] == chat_id
]
# 如果没有符合条件的记录,返回空列表
if not filtered_summaries:
return get_result(data=[])
# 返回过滤后的概要信息
return get_result(data=filtered_summaries)
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"])
@token_required
def delete(tenant_id,chat_id):
def delete(tenant_id, chat_id):
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
return get_error_data_result(message="You don't own the chat")
req = request.json
@@ -439,21 +685,22 @@ def delete(tenant_id,chat_id):
if not req:
ids = None
else:
ids=req.get("ids")
ids = req.get("ids")
if not ids:
conv_list = []
for conv in convs:
conv_list.append(conv.id)
else:
conv_list=ids
conv_list = ids
for id in conv_list:
conv = ConversationService.query(id=id,dialog_id=chat_id)
conv = ConversationService.query(id=id, dialog_id=chat_id)
if not conv:
return get_error_data_result(message="The chat doesn't own the session")
ConversationService.delete_by_id(id)
return get_result()
@manager.route('/sessions/ask', methods=['POST'])
@token_required
def ask_about(tenant_id):
@@ -462,17 +709,18 @@ def ask_about(tenant_id):
return get_error_data_result("`question` is required.")
if not req.get("dataset_ids"):
return get_error_data_result("`dataset_ids` is required.")
if not isinstance(req.get("dataset_ids"),list):
if not isinstance(req.get("dataset_ids"), list):
return get_error_data_result("`dataset_ids` should be a list.")
req["kb_ids"]=req.pop("dataset_ids")
req["kb_ids"] = req.pop("dataset_ids")
for kb_id in req["kb_ids"]:
if not KnowledgebaseService.accessible(kb_id,tenant_id):
if not KnowledgebaseService.accessible(kb_id, tenant_id):
return get_error_data_result(f"You don't own the dataset {kb_id}.")
kbs = KnowledgebaseService.query(id=kb_id)
kb = kbs[0]
if kb.chunk_num == 0:
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
uid = tenant_id
def stream():
nonlocal req, uid
try:

View File

@@ -912,6 +912,12 @@ class Dialog(DataBaseModel):
help_text="is it validate(0: wasted, 1: validate)",
default="1",
index=True)
# tts_id added by cyx 为每一个对话助理设置相应的tts
tts_id = CharField(
max_length=256,
null=True,
help_text="default tts model ID",
index=True)
class Meta:
db_table = "dialog"

View File

@@ -32,6 +32,37 @@ from rag.app.resume import forbidden_select_fields4resume
from rag.nlp.search import index_name
from rag.utils import rmSpace, num_tokens_from_string, encoder
from api.utils.file_utils import get_project_base_directory
from peewee import fn
import threading, queue
# 创建一个 TTS 生成线程
class TTSWorker(threading.Thread):
def __init__(self, tenant_id, tts_id, tts_text_queue, tts_audio_queue):
super().__init__()
self.tts_mdl = LLMBundle(tenant_id, LLMType.TTS, tts_id)
self.tts_text_queue = tts_text_queue
self.tts_audio_queue = tts_audio_queue
self.daemon = True # 设置为守护线程,主线程退出时,子线程也会自动退出
def run(self):
while True:
# 从队列中获取数据
delta_ans = self.tts_text_queue.get()
if delta_ans is None: # 如果队列中没有数据,退出线程
break
try:
# 调用 TTS 生成音频数据
tts_input_is_valid, sanitized_text = validate_and_sanitize_tts_input(delta_ans)
if tts_input_is_valid:
logging.info(f"--tts threading {delta_ans} {tts_input_is_valid} {sanitized_text}")
bin = b""
for chunk in self.tts_mdl.tts(sanitized_text):
bin += chunk
# 将生成的音频数据存储到队列中或直接处理
self.tts_audio_queue.put(bin)
except Exception as e:
logging.error(f"Error generating TTS for text '{delta_ans}': {e}")
class DialogService(CommonService):
@@ -65,22 +96,61 @@ class ConversationService(CommonService):
@classmethod
@DB.connection_context()
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id, name, cols=None):
# 构建基础查询
print("--ConversationService get_list enter", page_number, items_per_page) # cyx
query = cls.model.select().where(cls.model.dialog_id == dialog_id)
# 如果指定了ID则添加ID筛选
if id:
sessions = sessions.where(cls.model.id == id)
query = query.where(cls.model.id == id)
# 如果指定了名称,则添加名称筛选
if name:
sessions = sessions.where(cls.model.name == name)
query = query.where(cls.model.name == name)
# 如果指定了列筛选,则只选择指定的列
if cols:
query = query.select(*[getattr(cls.model, col) for col in cols])
# 获取记录总数
total = query.count()
# 添加排序
if desc:
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
query = query.order_by(cls.model.getter_by(orderby).desc())
else:
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
query = query.order_by(cls.model.getter_by(orderby).asc())
sessions = sessions.paginate(page_number, items_per_page)
# 执行分页查询
paginated_query = query.paginate(page_number, items_per_page)
data = list(paginated_query.dicts())
# logging.info("--ConversationService get_list",total, data) #cyx
# 返回分页数据和记录总数
return total, data
return list(sessions.dicts())
@classmethod
@DB.connection_context()
def query_sessions_summary(cls):
# 按 id 分组,统计每个 id 的最旧记录
query = (
cls.model
.select(
cls.model.id,
cls.model.dialog_id,
cls.model.name,
fn.MIN(cls.model.create_time).alias("create_time"),
fn.MIN(cls.model.create_date).alias("create_date")
)
.group_by(cls.model.id, cls.model.dialog_id, cls.model.name)
.order_by(
fn.MIN(cls.model.create_time).desc(),
)
)
# 转换为字典列表返回
return list(query.dicts())
def message_fit_in(msg, max_length=4000):
def count():
nonlocal msg
@@ -128,6 +198,42 @@ def llm_id2llm_type(llm_id):
if llm_id == llm["llm_name"]:
return llm["model_type"].strip(",")[-1]
# cyx 2024 12 04
# 用于校验和修正语音合成的输入文本。该函数会去除非法字符、修正内容,并返回一个结果:包括是否有效和修正后的文本。
def validate_and_sanitize_tts_input(delta_ans, max_length=3000):
"""
检验并修正语音合成的输入文本。
Args:
delta_ans (str): 输入的待校验文本。
max_length (int): 文本允许的最大长度。
Returns:
tuple: (is_valid, sanitized_text)
- is_valid (bool): 文本是否有效。
- sanitized_text (str): 修正后的文本(如果无效,为空字符串)。
"""
# 1. 确保输入为字符串
if not isinstance(delta_ans, str):
return False, ""
# 2. 去除前后空白并检查是否为空
delta_ans = delta_ans.strip()
if len(delta_ans) == 0:
return False, ""
# 3. 替换全角符号为半角
delta_ans = re.sub(r'[]', '?', delta_ans)
# 4. 移除非法字符(仅保留中文、英文、数字及常见标点符号)
delta_ans = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s,.!?\'";;。,!?:”“()\-()]', '', delta_ans)
# 5. 检查长度
if len(delta_ans) == 0 or len(delta_ans) > max_length:
return False, ""
# 如果通过所有检查,返回有效标志和修正后的文本
return True, delta_ans
def chat(dialog, messages, stream=True, **kwargs):
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
@@ -175,8 +281,10 @@ def chat(dialog, messages, stream=True, **kwargs):
prompt_config = dialog.prompt_config
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
tts_mdl = None
if prompt_config.get("tts"):
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS,dialog.tts_id)
# try to use sql if field mapping is good to go
if field_map:
logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
@@ -184,7 +292,7 @@ def chat(dialog, messages, stream=True, **kwargs):
if ans:
yield ans
return
# logging.info(f"dialog_service--1 chat prompt_config{prompt_config['parameters']} {prompt_config}") # cyx
for p in prompt_config["parameters"]:
if p["key"] == "knowledge":
continue
@@ -223,6 +331,7 @@ def chat(dialog, messages, stream=True, **kwargs):
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
logging.debug(
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
retrieval_tm = timer()
if not knowledges and prompt_config.get("empty_response"):
@@ -245,7 +354,6 @@ def chat(dialog, messages, stream=True, **kwargs):
gen_conf["max_tokens"] = min(
gen_conf["max_tokens"],
max_tokens - used_token_count)
def decorate_answer(answer):
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_tm
refs = []
@@ -281,22 +389,44 @@ def chat(dialog, messages, stream=True, **kwargs):
last_ans = ""
answer = ""
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
answer = ans
delta_ans = ans[len(last_ans):]
if num_tokens_from_string(delta_ans) < 16:
continue
last_ans = answer
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
# yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
# cyx 2024 12 04 修正delta_ans 为空 ,调用tts 出错
tts_input_is_valid, sanitized_text = validate_and_sanitize_tts_input(delta_ans)
#if kwargs.get('tts_disable'): # cyx 2025 01 18 前端传入tts_disable 参数就不生成tts 音频给前端,即:没有audio_binary
tts_input_is_valid = False
if tts_input_is_valid:
yield {"answer": answer, "delta_ans": sanitized_text, "reference": {}, "audio_binary": tts(tts_mdl, sanitized_text)}
else:
yield {"answer": answer, "delta_ans": sanitized_text, "reference": {}}
delta_ans = answer[len(last_ans):]
if delta_ans:
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
# yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
# cyx 2024 12 04 修正delta_ans 为空调用tts 出错
tts_input_is_valid, sanitized_text = validate_and_sanitize_tts_input(delta_ans)
#if kwargs.get('tts_disable'): # cyx 2025 01 18 前端传入tts_disable 参数就不生成tts 音频给前端,即:没有audio_binary
tts_input_is_valid = False
if tts_input_is_valid:
yield {"answer": answer, "delta_ans": sanitized_text,"reference": {}, "audio_binary": tts(tts_mdl, sanitized_text)}
else:
yield {"answer": answer, "delta_ans": sanitized_text,"reference": {}}
yield decorate_answer(answer)
else:
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
logging.debug("User: {}|Assistant: {}".format(
msg[-1]["content"], answer))
res = decorate_answer(answer)
res["audio_binary"] = tts(tts_mdl, answer)
if kwargs.get('tts_disable'): # cyx 2025 01 18 前端传入tts_disable 参数就不生成tts 音频给前端,即:没有audio_binary
tts_input_is_valid = False
else:
res["audio_binary"] = tts(tts_mdl, answer)
yield res

View File

@@ -140,6 +140,7 @@ class TenantLLMService(CommonService):
if llm_type == LLMType.TTS:
if model_config["llm_factory"] not in TTSModel:
return
# 初始化 tts model cyx
return TTSModel[model_config["llm_factory"]](
model_config["api_key"],
model_config["llm_name"],
@@ -201,6 +202,8 @@ class LLMBundle(object):
assert self.mdl, "Can't find model for {}/{}/{}".format(
tenant_id, llm_type, llm_name)
self.max_length = 8192
if llm_type == LLMType.TTS:
logging.info(f"dale--TTS model {tenant_id} {llm_type} {llm_name}")
for lm in LLMService.query(llm_name=llm_name):
self.max_length = lm.max_tokens
break
@@ -245,7 +248,7 @@ class LLMBundle(object):
"LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
return txt
def tts(self, text):
def tts(self, text): # tts 调用 cyx
for chunk in self.mdl.tts(text):
if isinstance(chunk,int):
if not TenantLLMService.increase_usage(
@@ -255,6 +258,9 @@ class LLMBundle(object):
return
yield chunk
def end_tts(self): # 结束 tts流式 调用 cyx
self.mdl.end_tts()
def chat(self, system, history, gen_conf):
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
if isinstance(txt, int) and not TenantLLMService.increase_usage(

View File

@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os,logging
from datetime import date
from enum import IntEnum, Enum
import rag.utils.es_conn
@@ -148,7 +148,6 @@ def init_settings():
SECRET_KEY = get_base_config(
RAG_FLOW_SERVICE_NAME,
{}).get("secret_key", str(date.today()))
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH
# authentication
AUTHENTICATION_CONF = get_base_config("authentication", {})

Binary file not shown.

Binary file not shown.

100256
cl100k_base.tiktoken Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -128,6 +128,19 @@
"max_tokens": 2048,
"model_type": "tts"
},
{
"llm_name": "sambert-zhiying-v1",
"tags": "TTS",
"max_tokens": 2048,
"model_type": "tts"
},
{
"llm_name": "cosyvoice-v1",
"tags": "TTS",
"max_tokens": 2048,
"model_type": "tts"
},
{
"llm_name": "text-embedding-v3",
"tags": "TEXT EMBEDDING,8K",

BIN
deepdoc/data/doc_test.pdf Normal file

Binary file not shown.

BIN
deepdoc/data/xls_test.pdf Normal file

Binary file not shown.

BIN
deepdoc/data/xls_test.xls Normal file

Binary file not shown.

BIN
deepdoc/data/xls_test.xlsx Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 747 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 802 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 930 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 914 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 418 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 556 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 503 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 231 KiB

View File

@@ -10,7 +10,7 @@ done < /ragflow/conf/service_conf.yaml.template
# unset http proxy which maybe set by docker daemon
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
/usr/sbin/nginx
# /usr/sbin/nginx # cyx
export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/

16
docker/ragflow.service Normal file
View File

@@ -0,0 +1,16 @@
[Unit]
Description=Ragflow docker Service
After=docker.service
Requires=docker.service
[Service]
Type=simple
ExecStart=/usr/bin/docker compose -f docker-compose.yml up -d
Restart=on-failure
RestartSec=5s
User=ubuntu
WorkingDirectory=/home/ubuntu/ragflow/docker
[Install]
WantedBy=multi-user.target

View File

@@ -6,7 +6,8 @@ mysql:
user: '${MYSQL_USER:-root}'
password: '${MYSQL_PASSWORD:-infini_rag_flow}'
host: '${MYSQL_HOST:-mysql}'
port: 3306
# port: 3306 # cyx
port: 5455
max_connections: 100
stale_timeout: 30
minio:
@@ -14,7 +15,9 @@ minio:
password: '${MINIO_PASSWORD:-infini_rag_flow}'
host: '${MINIO_HOST:-minio}:9000'
es:
hosts: 'http://${ES_HOST:-es01}:9200'
# hosts: 'http://${ES_HOST:-es01}:9200' # cyx
hosts: 'http://${ES_HOST:-es01}:1200'
username: '${ES_USER:-elastic}'
username: '${ES_USER:-elastic}'
password: '${ELASTIC_PASSWORD:-infini_rag_flow}'
infinity:

Binary file not shown.

Binary file not shown.

54
poetry.lock generated
View File

@@ -412,19 +412,19 @@ feedparser = ">=6.0.10,<6.1.0"
requests = ">=2.32.0,<2.33.0"
[[package]]
name = "aspose-slides"
name = "Aspose.Slides"
version = "24.11.0"
description = "Aspose.Slides for Python via .NET is a presentation file formats processing library for working with Microsoft PowerPoint files without using Microsoft PowerPoint."
optional = false
python-versions = ">=3.5,<3.14"
files = [
{file = "Aspose.Slides-24.11.0-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:b4819364497f9e075e00e63ee8fba8745dda4c910e199d5201e4abeebdcdec89"},
{file = "Aspose.Slides-24.11.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bbeb5f0b14901f29f209beeac694a183f8d36c9475556ddeed3b2edb8107536a"},
{file = "Aspose.Slides-24.11.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:24d6287401863de1251fb366be4845e1693ff1c70f09f04fed1e2086561401f5"},
{file = "Aspose.Slides-24.11.0-py3-none-win32.whl", hash = "sha256:9e07bcb1c6b17f01d51d9d9c88b1fbc40da580e54ccabb6373e884e64f406a8b"},
{file = "Aspose.Slides-24.11.0-py3-none-win_amd64.whl", hash = "sha256:2b249848a0432cd2746d94011fe6258038c04615ef8606ddd1bb238f5e9d4f2f"},
]
[package.source]
type = "file"
url = "Aspose.Slides-24.11.0-py3-none-manylinux1_x86_64.whl"
[[package]]
name = "async-timeout"
version = "5.0.1"
@@ -568,7 +568,7 @@ name = "bce-python-sdk"
version = "0.9.23"
description = "BCE SDK for python"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4"
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,<4,>=2.7"
files = [
{file = "bce_python_sdk-0.9.23-py3-none-any.whl", hash = "sha256:8debe21a040e00060f6044877d594765ed7b18bc765c6bf16b878bca864140a3"},
{file = "bce_python_sdk-0.9.23.tar.gz", hash = "sha256:19739fed5cd0725356fc5ffa2acbdd8fb23f2a81edb91db21a03174551d0cf41"},
@@ -1709,7 +1709,7 @@ name = "deprecated"
version = "1.2.15"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
files = [
{file = "Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320"},
{file = "deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d"},
@@ -2040,7 +2040,7 @@ name = "fastembed"
version = "0.3.6"
description = "Fast, light, accurate library built for retrieval embedding generation"
optional = false
python-versions = ">=3.8.0,<3.13"
python-versions = "<3.13,>=3.8.0"
files = [
{file = "fastembed-0.3.6-py3-none-any.whl", hash = "sha256:2bf70edae28bb4ccd9e01617098c2075b0ba35b88025a3d22b0e1e85b2c488ce"},
{file = "fastembed-0.3.6.tar.gz", hash = "sha256:c93c8ec99b8c008c2d192d6297866b8d70ec7ac8f5696b34eb5ea91f85efd15f"},
@@ -2622,12 +2622,12 @@ files = [
google-auth = ">=2.14.1,<3.0.dev0"
googleapis-common-protos = ">=1.56.2,<2.0.dev0"
grpcio = [
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
{version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
]
grpcio-status = [
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
{version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
]
proto-plus = ">=1.22.3,<2.0.0dev"
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0"
@@ -2963,7 +2963,7 @@ name = "graspologic"
version = "3.4.1"
description = "A set of Python modules for graph statistics"
optional = false
python-versions = ">=3.9,<3.13"
python-versions = "<3.13,>=3.9"
files = [
{file = "graspologic-3.4.1-py3-none-any.whl", hash = "sha256:c6563e087eda599bad1de831d4b7321c0daa7a82f4e85a7d7737ff67e07cdda2"},
{file = "graspologic-3.4.1.tar.gz", hash = "sha256:7561f0b852a2bccd351bff77e8db07d9892f9dfa35a420fdec01690e4fdc8075"},
@@ -3648,7 +3648,7 @@ name = "infinity-emb"
version = "0.0.66"
description = "Infinity is a high-throughput, low-latency REST API for serving text-embeddings, reranking models and clip."
optional = false
python-versions = ">=3.9,<4"
python-versions = "<4,>=3.9"
files = [
{file = "infinity_emb-0.0.66-py3-none-any.whl", hash = "sha256:1dc6ed9fa48e6cbe83650a7583dbbb4bc393900c39c326bb0aff2ddc090ac018"},
{file = "infinity_emb-0.0.66.tar.gz", hash = "sha256:9c9a361ccebf8e8f626c1f685286518d03d0c35e7d14179ae7c2500b4fc68b98"},
@@ -4096,7 +4096,7 @@ name = "litellm"
version = "1.48.0"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = ">=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*"
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
files = [
{file = "litellm-1.48.0-py3-none-any.whl", hash = "sha256:7765e8a92069778f5fc66aacfabd0e2f8ec8d74fb117f5e475567d89b0d376b9"},
{file = "litellm-1.48.0.tar.gz", hash = "sha256:31a9b8a25a9daf44c24ddc08bf74298da920f2c5cea44135e5061278d0aa6fc9"},
@@ -5413,9 +5413,9 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
{version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
{version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
{version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
]
@@ -5437,9 +5437,9 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
{version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
{version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
{version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
]
@@ -5654,8 +5654,8 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
]
python-dateutil = ">=2.8.2"
@@ -6289,7 +6289,7 @@ name = "psutil"
version = "6.1.0"
description = "Cross-platform lib for process and system monitoring in Python."
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"},
{file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"},
@@ -6311,8 +6311,8 @@ files = [
]
[package.extras]
dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx-rtd-theme", "toml-sort", "twine", "virtualenv", "wheel"]
test = ["enum34", "futures", "ipaddress", "mock (==1.0.1)", "pytest (==4.6.11)", "pytest-xdist", "setuptools", "unittest2"]
dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"]
test = ["pytest", "pytest-xdist", "setuptools"]
[[package]]
name = "psycopg2-binary"
@@ -7803,30 +7803,40 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"},
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},
@@ -7837,7 +7847,7 @@ name = "s3transfer"
version = "0.10.4"
description = "An Amazon S3 Transfer Manager"
optional = false
python-versions = ">= 3.8"
python-versions = ">=3.8"
files = [
{file = "s3transfer-0.10.4-py3-none-any.whl", hash = "sha256:244a76a24355363a68164241438de1b72f8781664920260c48465896b712a41e"},
{file = "s3transfer-0.10.4.tar.gz", hash = "sha256:29edc09801743c21eb5ecbc617a152df41d3c287f67b615f73e5f750583666a7"},
@@ -8299,7 +8309,7 @@ name = "smart-open"
version = "7.0.5"
description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)"
optional = false
python-versions = ">=3.7,<4.0"
python-versions = "<4.0,>=3.7"
files = [
{file = "smart_open-7.0.5-py3-none-any.whl", hash = "sha256:8523ed805c12dff3eaa50e9c903a6cb0ae78800626631c5fe7ea073439847b89"},
{file = "smart_open-7.0.5.tar.gz", hash = "sha256:d3672003b1dbc85e2013e4983b88eb9a5ccfd389b0d4e5015f39a9ee5620ec18"},
@@ -10079,4 +10089,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
content-hash = "393f51d55da83dc829e387a5f08087a2e90a40ea63dd034586b7717143a115ca"
content-hash = "edade7b01ef0deb66e858cc688ed5b69bdd96be2e8484dc888eb7eb1ba5c6ea1"

View File

@@ -16,7 +16,7 @@ azure-identity = "1.17.1"
azure-storage-file-datalake = "12.16.0"
anthropic = "=0.34.1"
arxiv = "2.1.3"
aspose-slides = { version = "^24.9.0", markers = "platform_machine == 'x86_64' or (sys_platform == 'darwin' and platform_machine == 'arm64') " }
aspose-slides = {path = "Aspose.Slides-24.11.0-py3-none-manylinux1_x86_64.whl"}
beartype = "^0.18.5"
bio = "1.7.1"
boto3 = "1.34.140"

View File

@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
from openai.lib.azure import AzureOpenAI
@@ -321,6 +322,7 @@ class ZhipuChat(Base):
if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
ans = ""
tk_count = 0
# logging.info(f"chat_streamly messages={history}") # cyx
try:
response = self.client.chat.completions.create(
model=self.model_name,

View File

@@ -70,6 +70,8 @@ class Base(ABC):
def tts(self, audio):
pass
def end_tts(self):
pass
def normalize_text(self, text):
return re.sub(r'(\*\*|##\d+\$\$|#)', '', text)
@@ -118,15 +120,30 @@ class FishAudioTTS(Base):
class QwenTTS(Base):
def __init__(self, key, model_name, base_url=""):
import dashscope
print("---begin--init QwenTTS--") # cyx
self.model_name = model_name
dashscope.api_key = key
self.synthesizer = None
self.callback = None
self.is_cosyvoice = False
self.voice = ""
if '/' in model_name:
parts = model_name.split('/', 1)
# 返回分离后的两个字符串parts[0], parts[1]
if parts[0] == 'cosyvoice-v1':
self.is_cosyvoice = True
self.voice = parts[1]
def tts(self, text):
from dashscope.api_entities.dashscope_response import SpeechSynthesisResponse
from dashscope.audio.tts import ResultCallback, SpeechSynthesizer, SpeechSynthesisResult
from collections import deque
if self.is_cosyvoice is False:
from dashscope.audio.tts import ResultCallback, SpeechSynthesizer, SpeechSynthesisResult
from collections import deque
else:
# cyx 2025 01 19 测试cosyvoice 使用tts_v2 版本
from dashscope.audio.tts_v2 import ResultCallback, SpeechSynthesizer, AudioFormat #, SpeechSynthesisResult
from dashscope.audio.tts import SpeechSynthesisResult
from collections import deque
# print(f"--QwenTTS--tts_stream begin-- {text}") # cyx
class Callback(ResultCallback):
def __init__(self) -> None:
self.dque = deque()
@@ -149,6 +166,7 @@ class QwenTTS(Base):
self.dque.append(None)
def on_error(self, response: SpeechSynthesisResponse):
print("Qwen tts error", str(response))
raise RuntimeError(str(response))
def on_close(self):
@@ -158,20 +176,91 @@ class QwenTTS(Base):
if result.get_audio_frame() is not None:
self.dque.append(result.get_audio_frame())
# --------------------------
class Callback_v2(ResultCallback):
def __init__(self) -> None:
self.dque = deque()
def _run(self):
while True:
if not self.dque:
time.sleep(0)
continue
val = self.dque.popleft()
if val:
yield val
else:
break
def on_open(self):
pass
def on_complete(self):
self.dque.append(None)
def on_error(self, response: SpeechSynthesisResponse):
print("Qwen tts error", str(response))
raise RuntimeError(str(response))
def on_close(self):
print("---Qwen call back close") # cyx
pass
""" canceled for test 语音大模型CosyVoice
def on_event(self, result: SpeechSynthesisResult):
if result.get_audio_frame() is not None:
self.dque.append(result.get_audio_frame())
"""
def on_event(self, message):
# print(f"recv speech synthsis message {message}")
pass
# 以下适合语音大模型CosyVoice
def on_data(self, data: bytes) -> None:
if len(data) > 0:
self.dque.append(data)
# --------------------------
text = self.normalize_text(text)
callback = Callback()
SpeechSynthesizer.call(model=self.model_name,
text=text,
callback=callback,
format="mp3")
try:
for data in callback._run():
#if self.model_name != 'cosyvoice-v1':
if self.is_cosyvoice is False:
self.callback = Callback()
SpeechSynthesizer.call(model=self.model_name,
text=text,
callback=self.callback,
format="mp3")
else:
self.callback = Callback_v2()
self.synthesizer = SpeechSynthesizer(
model='cosyvoice-v1',
# voice="longyuan", #"longfei",
voice = self.voice,
callback=self.callback,
format=AudioFormat.MP3_44100HZ_MONO_256KBPS
)
self.synthesizer.call(text)
except Exception as e:
print(f"---dale---20 error {e}") # cyx
# -----------------------------------
try:
for data in self.callback._run():
yield data
print(f"---Qwen return data {num_tokens_from_string(text)}")
yield num_tokens_from_string(text)
except Exception as e:
raise RuntimeError(f"**ERROR**: {e}")
raise RuntimeError(f"**ERROR**: {e}")
def end_tts(self):
if self.synthesizer:
self.synthesizer.streaming_complete()
class OpenAITTS(Base):
def __init__(self, key, model_name="tts-1", base_url="https://api.openai.com/v1"):

Binary file not shown.

View File

@@ -0,0 +1 @@
8fa69132f47013fde76d4d5c8044748d

View File

@@ -1,2 +0,0 @@
engine-strict=true
registry=https://registry.npmmirror.com/

View File

@@ -34,7 +34,7 @@ export default defineConfig({
proxy: [
{
context: ['/api', '/v1'],
target: 'http://127.0.0.1:9456/',
target: 'http://127.0.0.1:9380', //'http://127.0.0.1:9456/',
changeOrigin: true,
ws: true,
logger: console,

29
web/config/config.ts Normal file
View File

@@ -0,0 +1,29 @@
import { defineConfig } from 'umi';
import { BundleAnalyzerPlugin } from 'webpack-bundle-analyzer';
export default defineConfig({
chainWebpack(config) {
config.plugin('bundle-analyzer').use(BundleAnalyzerPlugin, [
{
analyzerMode: 'static', // 输出静态 HTML 文件
reportFilename: 'bundle-report.html', // 报告文件名
openAnalyzer: false, // 不自动打开浏览器
},
]);
},
mfsu: {
strategy: 'eager', // 优化模块共享机制
},
extraBabelPlugins: [
[
'import',
{
libraryName: 'antd',
libraryDirectory: 'es',
style: true,
},
],
],
});

17441
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@
"author": "bill",
"scripts": {
"build": "umi build",
"dev": "cross-env UMI_DEV_SERVER_COMPRESS=none umi dev",
"dev": "cross-env UMI_DEV_SERVER_COMPRESS=gzip umi dev",
"postinstall": "umi setup",
"lint": "umi lint --eslint-only",
"prepare": "cd .. && husky web/.husky",
@@ -44,7 +44,7 @@
"@uiw/react-markdown-preview": "^5.1.3",
"ahooks": "^3.7.10",
"antd": "^5.12.7",
"axios": "^1.6.3",
"axios": "^1.7.8",
"class-variance-authority": "^0.7.0",
"classnames": "^2.5.1",
"clsx": "^2.1.1",
@@ -79,6 +79,7 @@
"remark-gfm": "^4.0.0",
"tailwind-merge": "^2.5.4",
"tailwindcss-animate": "^1.0.7",
"three-bmfont-text": "^3.0.1",
"umi": "^4.0.90",
"umi-request": "^1.4.0",
"unist-util-visit-parents": "^6.0.1",
@@ -103,6 +104,7 @@
"@types/webpack-env": "^1.18.4",
"@umijs/lint": "^4.1.1",
"@umijs/plugins": "^4.1.0",
"babel-plugin-import": "^1.13.8",
"cross-env": "^7.0.3",
"html-loader": "^5.1.0",
"husky": "^9.0.11",
@@ -117,7 +119,8 @@
"tailwindcss": "^3",
"ts-node": "^10.9.2",
"typescript": "^5.0.3",
"umi-plugin-icons": "^0.1.1"
"umi-plugin-icons": "^0.1.1",
"webpack-bundle-analyzer": "^4.10.2"
},
"engines": {
"node": ">=18.20.4"

View File

@@ -19,7 +19,7 @@ const ChatApiKeyModal = ({
const columns: TableProps<IToken>['columns'] = [
{
title: 'Token',
title: 'Token-dale',
dataIndex: 'token',
key: 'token',
render: (text) => <a>{text}</a>,

View File

@@ -4,3 +4,14 @@
.variableSlider {
width: 100%;
}
.modelOptionRow {
padding: 0;
margin: 2px !important ;
}
.modelOptionCol {
padding: 0;
margin: 0px !important;
}
.formItemSmallMargin {
margin: 2px;
}

View File

@@ -3,7 +3,17 @@ import {
ModelVariableType,
settledModelVariableMap,
} from '@/constants/knowledge';
import { Divider, Flex, Form, InputNumber, Select, Slider, Switch } from 'antd';
import {
Col,
Divider,
Flex,
Form,
InputNumber,
Row,
Select,
Slider,
Switch,
} from 'antd';
import camelCase from 'lodash/camelCase';
import { useTranslate } from '@/hooks/common-hooks';
@@ -16,7 +26,7 @@ interface IProps {
formItemLayout?: any;
handleParametersChange?(value: ModelVariableType): void;
}
//对话中的模型设置 使用此组件 cyx
const LlmSettingItems = ({ prefix, formItemLayout = {} }: IProps) => {
const form = Form.useFormInstance();
const { t } = useTranslate('chat');
@@ -44,18 +54,36 @@ const LlmSettingItems = ({ prefix, formItemLayout = {} }: IProps) => {
LlmModelType.Image2text,
]);
const ttsModelOptions = useComposeLlmOptionsByModelTypes([LlmModelType.TTS]);
return (
<>
<Form.Item
label={t('model')}
name="llm_id"
tooltip={t('modelTip')}
{...formItemLayout}
rules={[{ required: true, message: t('modelMessage') }]}
>
<Select options={modelOptions} showSearch />
</Form.Item>
<Divider></Divider>
<Row align={'middle'} justify="end" className={styles.modelOptionRow}>
<Col span={12} className={styles.modelOptionCol}>
<Form.Item
className={styles.formItemSmallMargin}
label={t('model')}
name="llm_id"
tooltip={t('modelTip')}
{...formItemLayout}
rules={[{ required: true, message: t('modelMessage') }]}
>
<Select options={modelOptions} showSearch />
</Form.Item>
</Col>
<Col span={12} className={styles.modelOptionCol}>
<Form.Item
className={styles.formItemSmallMargin}
label={t('ttsModel')}
name="tts_id"
tooltip={t('ttsModelTip')}
>
<Select options={ttsModelOptions} allowClear />
</Form.Item>
</Col>
</Row>
<Divider className={styles.formItemSmallMargin}></Divider>
<Form.Item
label={t('freedom')}
name="parameter"

View File

@@ -97,6 +97,7 @@ export const useSpeech = (content: string, audioBinary?: string) => {
useEffect(() => {
if (audioBinary) {
console.log('audio:', audioBinary);
const units = hexStringToUint8Array(audioBinary);
if (units) {
try {

View File

@@ -20,6 +20,8 @@ export enum ChatSearchParams {
DialogId = 'dialogId',
ConversationId = 'conversationId',
isNew = 'isNew',
CurrentPage = 'currentPage',
PageSize = 'pageSize',
}
export const EmptyConversationId = 'empty';

View File

@@ -21,18 +21,19 @@ import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query';
import { message } from 'antd';
import dayjs, { Dayjs } from 'dayjs';
import { has, set } from 'lodash';
import { useCallback, useMemo, useState } from 'react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { history, useSearchParams } from 'umi';
//#region logic
export const useClickDialogCard = () => {
const [_, setSearchParams] = useSearchParams();
const [currentPage, setCurrentPage] = useState(1);
const [pageSize, setPageSize] = useState(40); // 假设每页显示40条数据
const [data, setData] = useState(null);
const newQueryParameters: URLSearchParams = useMemo(() => {
return new URLSearchParams();
}, []);
const handleClickDialog = useCallback(
(dialogId: string) => {
newQueryParameters.set(ChatSearchParams.DialogId, dialogId);
@@ -41,11 +42,38 @@ export const useClickDialogCard = () => {
// EmptyConversationId,
// );
setSearchParams(newQueryParameters);
setCurrentPage(1); // 重置到第一页
},
[newQueryParameters, setSearchParams],
);
// 处理页码变化 added by cyx
const handlePageChange = (newPage) => {
setCurrentPage(newPage);
};
useEffect(() => {
console.log('--useClickDialogCard useEffect', currentPage, pageSize);
newQueryParameters.set(
ChatSearchParams.CurrentPage,
currentPage.toString(),
);
newQueryParameters.set(ChatSearchParams.PageSize, pageSize.toString());
setSearchParams(newQueryParameters);
}, [currentPage, pageSize]);
// 处理页面大小变化 added by cyx
const handlePageSizeChange = (newPageSize) => {
setPageSize(newPageSize);
setCurrentPage(1); // 重置到第一页
};
return { handleClickDialog };
return {
handleClickDialog,
//--下面4个 202501 30 加入 by cyx
currentPage,
pageSize,
handlePageChange,
handlePageSizeChange,
//---------------------------------
};
};
export const useClickConversationCard = () => {
@@ -56,9 +84,13 @@ export const useClickConversationCard = () => {
);
const handleClickConversation = useCallback(
(conversationId: string, isNew: string) => {
(conversationId: string, isNew: string, conversationName: string) => {
newQueryParameters.set(ChatSearchParams.ConversationId, conversationId);
newQueryParameters.set(ChatSearchParams.isNew, isNew);
newQueryParameters.set(
ChatSearchParams.ConversationName,
conversationName,
);
setSearchParams(newQueryParameters);
},
[setSearchParams, newQueryParameters],
@@ -69,12 +101,17 @@ export const useClickConversationCard = () => {
export const useGetChatSearchParams = () => {
const [currentQueryParameters] = useSearchParams();
return {
dialogId: currentQueryParameters.get(ChatSearchParams.DialogId) || '',
currentPage:
Number(currentQueryParameters.get(ChatSearchParams.CurrentPage)) || 1,
pageSize:
Number(currentQueryParameters.get(ChatSearchParams.PageSize)) || 40,
conversationId:
currentQueryParameters.get(ChatSearchParams.ConversationId) || '',
isNew: currentQueryParameters.get(ChatSearchParams.isNew) || '',
conversationName:
currentQueryParameters.get(ChatSearchParams.ConversationName) || '',
};
};
@@ -216,29 +253,43 @@ export const useRemoveNextDialog = () => {
//#region conversation
export const useFetchNextConversationList = () => {
const { dialogId } = useGetChatSearchParams();
export const useFetchNextConversationList = (params) => {
const {
dialogId: activeDialogId,
currentPage,
pageSize,
} = useGetChatSearchParams();
const dialogId = params?.dialogId || activeDialogId;
const { handleClickConversation } = useClickConversationCard();
const {
data,
data: queryData,
isFetching: loading,
refetch,
} = useQuery<IConversation[]>({
queryKey: ['fetchConversationList', dialogId],
initialData: [],
queryKey: ['fetchConversationList', dialogId, currentPage, pageSize],
initialData: { data: [], total: 0 },
gcTime: 0,
refetchOnWindowFocus: false,
enabled: !!dialogId,
queryFn: async () => {
const { data } = await chatService.listConversation({ dialogId });
if (data.code === 0 && data.data.length > 0) {
const params = { dialogId, currentPage, pageSize, ...params };
const { data } = await chatService.listConversation(params);
// cyx 2025 01 31 后端同时返回了数据记录总数,所以返回的数据变化
// code message data:{data: [....],total:xxxx}
/*if (data.code === 0 && data.data.length > 0) {
handleClickConversation(data.data[0].id, '');
}*/
if (data.code === 0 && data.data?.data.length > 0) {
handleClickConversation(data.data?.data[0].id, '');
}
return data?.data;
},
});
return { data, loading, refetch };
// 解构queryData以获取data和total
const { data, total } = queryData || {};
return { data, total, loading, refetch };
};
export const useFetchNextConversation = () => {

View File

@@ -30,6 +30,7 @@ export const useFetchLlmList = (
return data?.data ?? {};
},
refetchOnMount: true, // 确保每次组件挂载时都重新获取数据 //cyx 2024 12 04
});
return data;
@@ -64,7 +65,6 @@ const getLLMIconName = (fid: string, llm_name: string) => {
export const useSelectLlmOptionsByModelType = () => {
const llmInfo: IThirdOAIModelCollection = useFetchLlmList();
const groupOptionsByModelType = (modelType: LlmModelType) => {
return Object.entries(llmInfo)
.filter(([, value]) =>
@@ -115,7 +115,7 @@ export const useComposeLlmOptionsByModelTypes = (
modelTypes: LlmModelType[],
) => {
const allOptions = useSelectLlmOptionsByModelType();
console.log('llm-hooks useComposeLlmOptionsByModelTypes ', modelTypes); //cyx
return modelTypes.reduce<DefaultOptionType[]>((pre, cur) => {
const options = allOptions[cur];
options.forEach((x) => {
@@ -148,6 +148,9 @@ export const useFetchLlmFactoryList = (): ResponseGetType<IFactory[]> => {
export type LlmItem = { name: string; logo: string } & IMyLlmValue;
// 后台调用
//@manager.route('/my_llms', methods=['GET'])
//@login_required
export const useFetchMyLlmList = (): ResponseGetType<
Record<string, IMyLlmValue>
> => {
@@ -169,7 +172,6 @@ export const useSelectLlmList = () => {
const { data: myLlmList, loading: myLlmListLoading } = useFetchMyLlmList();
const { data: factoryList, loading: factoryListLoading } =
useFetchLlmFactoryList();
const nextMyLlmList: Array<LlmItem> = useMemo(() => {
return Object.entries(myLlmList).map(([key, value]) => ({
name: key,
@@ -263,6 +265,7 @@ export const useAddLlm = () => {
mutationKey: ['addLlm'],
mutationFn: async (params: IAddLlmRequestBody) => {
const { data } = await userService.add_llm(params);
console.log('useAddllm', params, data); //cyx
if (data.code === 0) {
queryClient.invalidateQueries({ queryKey: ['myLlmList'] });
queryClient.invalidateQueries({ queryKey: ['factoryList'] });

View File

@@ -221,7 +221,9 @@ export const useSendMessageWithSse = (
console.info('done?');
setDone(true);
resetAnswer();
return { data: await res, response };
let return_data = await res;
console.inf('useSendMessageWithSse--', return_data); //cyx
return { data: return_data, response };
} catch (e) {
setDone(true);
resetAnswer();

View File

@@ -33,6 +33,7 @@ export default {
pleaseSelect: '请选择',
pleaseInput: '请输入',
submit: '提交',
session_id: 'session_id',
},
login: {
login: '登录',
@@ -400,6 +401,7 @@ export default {
selfRag: 'Self-RAG',
selfRagTip: '请参考: https://huggingface.co/papers/2310.11511',
overview: '聊天 ID',
session_id: '对话 ID',
pv: '消息数',
uv: '活跃用户数',
speed: 'Token 输出速度',
@@ -438,6 +440,8 @@ export default {
'在多轮对话的中对去知识库查询的问题进行优化。会调用大模型额外消耗token。',
howUseId: '如何使用聊天ID',
description: '助理描述',
ttsModel: 'TTS模型',
ttsModelTip: '默认的tts模型会被用于在对话过程中请求语音生成时使用',
},
setting: {
profile: '概要',

View File

@@ -5,7 +5,9 @@ import {
settledModelVariableMap,
} from '@/constants/knowledge';
import { useTranslate } from '@/hooks/common-hooks';
import { useSelectLlmOptionsByModelType } from '@/hooks/llm-hooks';
import { useFetchModelId } from '@/hooks/logic-hooks';
import { useFetchTenantInfo } from '@/hooks/user-setting-hooks';
import { IDialog } from '@/interfaces/database/chat';
import { getBase64FromUploadFileList } from '@/utils/file-util';
import { removeUselessFieldsFromValues } from '@/utils/form';
@@ -15,11 +17,10 @@ import camelCase from 'lodash/camelCase';
import { useEffect, useRef, useState } from 'react';
import { IPromptConfigParameters } from '../interface';
import AssistantSetting from './assistant-setting';
import styles from './index.less';
import ModelSetting from './model-setting';
import PromptEngine from './prompt-engine';
import styles from './index.less';
const layout = {
labelCol: { span: 9 },
wrapperCol: { span: 15 },
@@ -55,6 +56,13 @@ interface IProps extends IModalManagerChildrenProps {
clearDialog: () => void;
}
export const useFetchSystemModelSettingOnMount = () => {
const { data: systemSetting } = useFetchTenantInfo();
const allOptions = useSelectLlmOptionsByModelType();
return { systemSetting, allOptions };
};
const ChatConfigurationModal = ({
visible,
hideModal,
@@ -64,7 +72,9 @@ const ChatConfigurationModal = ({
clearDialog,
}: IProps) => {
const [form] = Form.useForm();
//added by cyx allOptions 表示所有的模型列表
const { systemSetting: initialValues, allOptions } =
useFetchSystemModelSettingOnMount();
const [value, setValue] = useState<ConfigurationSegmented>(
ConfigurationSegmented.AssistantSetting,
);

View File

@@ -207,7 +207,11 @@ export const useSelectDerivedConversationList = () => {
const [list, setList] = useState<Array<IConversation>>([]);
const { data: currentDialog } = useFetchNextDialog();
const { data: conversationList, loading } = useFetchNextConversationList();
const {
data: conversationList,
total: conversationTotal,
loading,
} = useFetchNextConversationList();
const { dialogId } = useGetChatSearchParams();
const prologue = currentDialog?.prompt_config?.prologue ?? '';
const { setNewConversationRouteParams } = useSetNewConversationRouteParams();
@@ -244,8 +248,8 @@ export const useSelectDerivedConversationList = () => {
useEffect(() => {
setList([...conversationList]);
}, [conversationList]);
return { list, addTemporaryConversation, loading };
//console.log("--useSelectDerivedConversationList return total=",total)
return { list, conversationTotal, addTemporaryConversation, loading };
};
export const useSetConversation = () => {
@@ -474,6 +478,12 @@ export const useSendNextMessage = (controller: AbortController) => {
role: MessageType.User,
doc_ids: documentIds,
});
console.log(
'handlePressEnter--dale',
id,
value.trim(),
MessageType.User,
); //cyx
}
},
[addNewestQuestion, handleSendMessage, done, setValue, value],

View File

@@ -29,8 +29,9 @@
}
}
.chatTitleWrapper {
width: 220px;
padding: 26px 0;
width: 320px;
padding: 2px 0;
overflow-y: auto;
}
.chatTitle {
@@ -39,14 +40,15 @@
.chatTitleContent {
padding: 5px 10px;
overflow: auto;
overflow-y: auto;
height: calc(100vh - 140px); /* 视口高度减去上方元素的高度 */
}
.chatSpin {
chatTitleContent .chatSpin {
:global(.ant-spin-container) {
display: flex;
flex-direction: column;
gap: 10px;
overflow-y: auto;
}
}

View File

@@ -9,15 +9,17 @@ import {
Dropdown,
Flex,
MenuProps,
Pagination,
Space,
Spin,
Splitter,
Tag,
Tooltip,
Typography,
} from 'antd';
import { MenuItemProps } from 'antd/lib/menu/MenuItem';
import classNames from 'classnames';
import { useCallback, useState } from 'react';
import { useCallback, useEffect, useRef, useState } from 'react';
import ChatConfigurationModal from './chat-configuration-modal';
import ChatContainer from './chat-container';
import {
@@ -41,6 +43,7 @@ import { useSetSelectedRecord } from '@/hooks/logic-hooks';
import { IDialog } from '@/interfaces/database/chat';
import ChatIdModal from './chat-id-modal';
import styles from './index.less';
import SessionIdModal from './session-id-modal';
const { Text } = Typography;
@@ -48,11 +51,43 @@ const Chat = () => {
const { data: dialogList, loading: dialogLoading } = useFetchNextDialogList();
const { onRemoveDialog } = useDeleteDialog();
const { onRemoveConversation } = useDeleteConversation();
const { handleClickDialog } = useClickDialogCard();
const { handleClickDialog, handlePageChange: setConversationListPage } =
useClickDialogCard();
const { handleClickConversation } = useClickConversationCard();
const { dialogId, conversationId } = useGetChatSearchParams();
const { dialogId, conversationId, conversationName } =
useGetChatSearchParams();
//----------------------------------------------------added by cyx
const chatTitleRef = useRef(null); //added by cyx
const handleScroll = (event) => {
const { currentTarget } = event;
if (
currentTarget.scrollHeight - currentTarget.scrollTop ===
currentTarget.clientHeight
) {
// 滚动到了最底端
console.log('Reached the bottom!');
// 在这里执行您需要的操作
}
};
useEffect(() => {
const scrollContainer = chatTitleRef.current;
if (scrollContainer) {
scrollContainer.addEventListener('scroll', handleScroll);
}
// 组件卸载时移除事件监听器
return () => {
if (scrollContainer) {
scrollContainer.removeEventListener('scroll', handleScroll);
}
};
}, []);
//--------------------------------------------------
const {
list: conversationList,
conversationTotal,
addTemporaryConversation,
loading: conversationLoading,
} = useSelectDerivedConversationList();
@@ -85,6 +120,12 @@ const Chat = () => {
hideModal: hideOverviewModal,
showModal: showOverviewModal,
} = useSetModalState();
const {
visible: sessionIdModalVisible,
hideModal: hideSessionIdModal,
showModal: showSessionIdModal,
} = useSetModalState();
const { currentRecord, setRecord } = useSetSelectedRecord<IDialog>();
const [controller, setController] = useState(new AbortController());
@@ -137,6 +178,15 @@ const Chat = () => {
showConversationRenameModal(conversationId);
};
const handleShowSessionModal =
(dialog: IDialog): any =>
(info: any) => {
info?.domEvent?.preventDefault();
info?.domEvent?.stopPropagation();
setRecord(dialog);
showSessionIdModal();
};
const handleDialogCardClick = useCallback(
(dialogId: string) => () => {
handleClickDialog(dialogId);
@@ -145,13 +195,18 @@ const Chat = () => {
);
const handleConversationCardClick = useCallback(
(conversationId: string, isNew: boolean) => () => {
handleClickConversation(conversationId, isNew ? 'true' : '');
setController((pre) => {
pre.abort();
return new AbortController();
});
},
(conversationId: string, isNew: boolean, conversationName: string) =>
() => {
handleClickConversation(
conversationId,
isNew ? 'true' : '',
conversationName,
);
setController((pre) => {
pre.abort();
return new AbortController();
});
},
[handleClickConversation],
);
@@ -200,7 +255,7 @@ const Chat = () => {
return appItems;
};
const buildConversationItems = (conversationId: string) => {
const buildConversationItems = (dialog, conversationId: string) => {
const appItems: MenuProps['items'] = [
{
key: '1',
@@ -223,131 +278,195 @@ const Chat = () => {
</Space>
),
},
{ type: 'divider' },
{
key: '3',
onClick: handleShowSessionModal(dialog),
label: <Space>{t('session_id', { keyPrefix: 'common' })}</Space>,
},
];
return appItems;
};
// 分页改变时的处理函数
const handlePageChange = (page, pageSize) => {
// 这里应该有逻辑来获取新页码的数据
console.log(`当前页: ${page}, 每页显示:${pageSize}`);
setCurrentPage(page);
setConversationListPage(page);
};
const [currentPage, setCurrentPage] = useState(1);
return (
<Flex className={styles.chatWrapper}>
<Flex className={styles.chatAppWrapper}>
<Flex flex={1} vertical>
<Button type="primary" onClick={handleShowChatConfigurationModal()}>
{t('createAssistant')}
</Button>
<Divider></Divider>
<Flex className={styles.chatAppContent} vertical gap={10}>
<Spin spinning={dialogLoading} wrapperClassName={styles.chatSpin}>
{dialogList.map((x) => (
<Card
key={x.id}
hoverable
className={classNames(styles.chatAppCard, {
[styles.chatAppCardSelected]: dialogId === x.id,
})}
onMouseEnter={handleAppCardEnter(x.id)}
onMouseLeave={handleItemLeave}
onClick={handleDialogCardClick(x.id)}
<Splitter>
<Splitter.Panel defaultSize="20%" min="10%" max="40%" collapsible>
<Flex className={styles.chatAppWrapper}>
<Flex flex={1} vertical>
<Button
type="primary"
onClick={handleShowChatConfigurationModal()}
>
{t('createAssistant')}
</Button>
<Divider></Divider>
<Flex className={styles.chatAppContent} vertical gap={10}>
<Spin
spinning={dialogLoading}
wrapperClassName={styles.chatSpin}
>
<Flex justify="space-between" align="center">
<Space size={15}>
<Avatar src={x.icon} shape={'square'} />
<section>
<b>
<Text
ellipsis={{ tooltip: x.name }}
style={{ width: 130 }}
>
{x.name}
</Text>
</b>
<div>{x.description}</div>
</section>
</Space>
{activated === x.id && (
<section>
<Dropdown menu={{ items: buildAppItems(x) }}>
<ChatAppCube
className={styles.cubeIcon}
></ChatAppCube>
</Dropdown>
</section>
)}
</Flex>
</Card>
))}
</Spin>
{dialogList.map((x) => (
<Card
key={x.id}
hoverable
className={classNames(styles.chatAppCard, {
[styles.chatAppCardSelected]: dialogId === x.id,
})}
onMouseEnter={handleAppCardEnter(x.id)}
onMouseLeave={handleItemLeave}
onClick={handleDialogCardClick(x.id)}
>
<Flex justify="space-between" align="center">
<Space size={15}>
<Avatar src={x.icon} shape={'square'} />
<section>
<b>
<Text
ellipsis={{ tooltip: x.name }}
style={{ width: 130 }}
>
{x.name}
</Text>
</b>
<div>{x.description}</div>
</section>
</Space>
{activated === x.id && (
<section>
<Dropdown menu={{ items: buildAppItems(x) }}>
<ChatAppCube
className={styles.cubeIcon}
></ChatAppCube>
</Dropdown>
</section>
)}
</Flex>
</Card>
))}
</Spin>
</Flex>
</Flex>
</Flex>
</Flex>
</Flex>
<Divider type={'vertical'} className={styles.divider}></Divider>
<Flex className={styles.chatTitleWrapper}>
<Flex flex={1} vertical>
<Flex
justify={'space-between'}
align="center"
className={styles.chatTitle}
>
<Space>
<b>{t('chat')}</b>
<Tag>{conversationList.length}</Tag>
</Space>
<Tooltip title={t('newChat')}>
<div>
<SvgIcon
name="plus-circle-fill"
width={20}
onClick={handleCreateTemporaryConversation}
></SvgIcon>
</div>
</Tooltip>
</Flex>
<Divider></Divider>
<Flex vertical gap={10} className={styles.chatTitleContent}>
<Spin
spinning={conversationLoading}
wrapperClassName={styles.chatSpin}
>
{conversationList.map((x) => (
<Card
key={x.id}
hoverable
onClick={handleConversationCardClick(x.id, x.is_new)}
onMouseEnter={handleConversationCardEnter(x.id)}
onMouseLeave={handleConversationItemLeave}
className={classNames(styles.chatTitleCard, {
[styles.chatTitleCardSelected]: x.id === conversationId,
})}
{/*<Divider type={'vertical'} className={styles.divider}></Divider>*/}
</Splitter.Panel>
<Splitter.Panel
defaultSize="20%"
min="10%"
max="40%"
style={{ overflow: 'hidden' }}
>
<Flex className={styles.chatTitleWrapper}>
{1 && (
<Flex flex={1} vertical>
<Flex
justify={'space-between'}
align="center"
className={styles.chatTitle}
style={{ whiteSpace: 'nowrap', margin: '0px' }}
>
<Flex justify="space-between" align="center">
<Space size={0}>
<b>{t('chat')}</b>
{/* <Tag>{conversationList.length}</Tag> */}
<Tag style={{ margin: '0 0' }}>{conversationTotal}</Tag>
{/* 分页控件 */}
<Pagination
current={currentPage}
pageSize={40}
total={conversationTotal}
onChange={handlePageChange}
simple
showSizeChanger={false} // 不显示条数选择器
pageSizeOptions={[]} // 不显示每页条数选项
style={{
display: 'inline-block',
padding: '0px',
margin: '0 0px',
}} // 根据需要调整样式
/>
</Space>
<Tooltip title={t('newChat')}>
<div>
<Text
ellipsis={{ tooltip: x.name }}
style={{ width: 150 }}
>
{x.name}
</Text>
<SvgIcon
name="plus-circle-fill"
width={20}
onClick={handleCreateTemporaryConversation}
></SvgIcon>
</div>
{conversationActivated === x.id && x.id !== '' && (
<section>
<Dropdown
menu={{ items: buildConversationItems(x.id) }}
>
<ChatAppCube
className={styles.cubeIcon}
></ChatAppCube>
</Dropdown>
</section>
)}
</Flex>
</Card>
))}
</Spin>
</Tooltip>
</Flex>
<Divider style={{ margin: '0px' }}></Divider>
<Flex
vertical
gap={2}
className={styles.chatTitleContent}
ref={chatTitleRef}
>
<Spin
spinning={conversationLoading}
wrapperClassName={styles.chatSpin}
style={{ overflowY: 'auto' }}
>
{conversationList.map((x) => (
<Card
key={x.id}
hoverable
onClick={handleConversationCardClick(
x.id,
x.is_new,
x.name,
)}
onMouseEnter={handleConversationCardEnter(x.id)}
onMouseLeave={handleConversationItemLeave}
className={classNames(styles.chatTitleCard, {
[styles.chatTitleCardSelected]:
x.id === conversationId,
})}
>
<Flex justify="space-between" align="center">
<div>
<Text
ellipsis={{ tooltip: x.name }}
style={{ width: 260 }}
>
{x.name}
</Text>
</div>
{conversationActivated === x.id && x.id !== '' && (
<section>
<Dropdown
menu={{
items: buildConversationItems(x, x.id),
}}
>
<ChatAppCube
className={styles.cubeIcon}
></ChatAppCube>
</Dropdown>
</section>
)}
</Flex>
</Card>
))}
</Spin>
</Flex>
</Flex>
)}
</Flex>
</Flex>
</Flex>
<Divider type={'vertical'} className={styles.divider}></Divider>
<ChatContainer controller={controller}></ChatContainer>
{/*<Divider type={'vertical'} className={styles.divider}></Divider>*/}
</Splitter.Panel>
<Splitter.Panel defaultSize="58%" min="20%" max="80%">
{1 && <ChatContainer controller={controller}></ChatContainer>}
</Splitter.Panel>
</Splitter>
{dialogEditVisible && (
<ChatConfigurationModal
visible={dialogEditVisible}
@@ -375,6 +494,15 @@ const Chat = () => {
idKey="dialogId"
></ChatIdModal>
)}
{sessionIdModalVisible && (
<SessionIdModal
visible={sessionIdModalVisible}
hideModal={hideSessionIdModal}
id={conversationId}
name={conversationName}
idKey="conversationId"
></SessionIdModal>
)}
</Flex>
);
};

View File

@@ -0,0 +1,3 @@
.id {
.linkText();
}

View File

@@ -0,0 +1,33 @@
import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common';
import { Modal, Typography } from 'antd';
import styles from './index.less';
const { Paragraph, Link } = Typography;
const SessionIdModal = ({
visible,
hideModal,
id,
name,
}: IModalProps<any> & { id: string; name?: string; idKey: string }) => {
const { t } = useTranslate('chat');
return (
<Modal
title={name + ' ' + t('session_id')}
open={visible}
onCancel={hideModal}
cancelButtonProps={{ style: { display: 'none' } }}
onOk={hideModal}
okText={t('close', { keyPrefix: 'common' })}
>
<Paragraph copyable={{ text: id }} className={styles.id}>
{id}
</Paragraph>
</Modal>
);
};
export default SessionIdModal;

View File

@@ -139,6 +139,7 @@ const ModelCard = ({ item, clickApiKey }: IModelCardProps) => {
const UserSettingModel = () => {
const { factoryList, myLlmList: llmList, loading } = useSelectLlmList();
console.log('UserSettingModel--', llmList); //cyx
const {
saveApiKeyLoading,
initialApiKey,
@@ -265,9 +266,15 @@ const UserSettingModel = () => {
const handleAddModel = useCallback(
(llmFactory: string) => {
if (isLocalLlmFactory(llmFactory)) {
console.log('--handleAddModel', llmFactory); //cyx
showLlmAddingModal(llmFactory);
} else if (llmFactory in ModalMap) {
ModalMap[llmFactory as keyof typeof ModalMap]();
console.log(
'--handleAddModel---2',
llmFactory,
ModalMap[llmFactory as keyof typeof ModalMap],
); //cyx
} else {
showApiKeyModal({ llm_factory: llmFactory });
}
@@ -284,6 +291,7 @@ const UserSettingModel = () => {
grid={{ gutter: 16, column: 1 }}
dataSource={llmList}
renderItem={(item) => (
//已经添加的模型 cyx
<ModelCard item={item} clickApiKey={handleAddModel}></ModelCard>
)}
/>

View File

@@ -1,7 +1,7 @@
import { useTranslate } from '@/hooks/common-hooks';
import { IModalProps } from '@/interfaces/common';
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
import { Flex, Form, Input, Modal, Select, Space, Switch, InputNumber } from 'antd';
import { Flex, Form, Input, InputNumber, Modal, Select, Space } from 'antd';
import omit from 'lodash/omit';
type FieldType = IAddLlmRequestBody & {
@@ -36,7 +36,7 @@ const VolcEngineModal = ({
...omit(values, ['vision']),
model_type: modelType,
llm_factory: llmFactory,
max_tokens:values.max_tokens,
max_tokens: values.max_tokens,
};
console.info(data);
@@ -81,6 +81,8 @@ const VolcEngineModal = ({
<Select placeholder={t('modelTypeMessage')}>
<Option value="chat">chat</Option>
<Option value="embedding">embedding</Option>
//added by cyx 2025-01-26
<Option value="TTS">TTS</Option>
</Select>
</Form.Item>
<Form.Item<FieldType>
@@ -128,7 +130,6 @@ const VolcEngineModal = ({
style={{ width: '100%' }}
/>
</Form.Item>
</Form>
</Modal>
);

12
web/webpack.config.js Normal file
View File

@@ -0,0 +1,12 @@
const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer');
module.exports = {
// 其他 Webpack 配置
plugins: [
new BundleAnalyzerPlugin({
analyzerMode: 'server', // 启动本地服务查看报告
openAnalyzer: true, // 自动打开浏览器
generateStatsFile: false, // 是否生成 stats.json 文件
}),
],
};