修复前端的 bug
This commit is contained in:
parent
e7b9e88c8f
commit
8d507cdcb2
@ -83,6 +83,7 @@ import Setting from './setting.vue';
|
||||
import { llmManager, llms } from '@/views/setting/llm';
|
||||
// 引入 markdown.ts 中的函数
|
||||
import { markdownToHtml, copyToClipboard } from './markdown';
|
||||
import { TaskLoop } from './task-loop';
|
||||
|
||||
defineComponent({ name: 'chat' });
|
||||
|
||||
@ -189,149 +190,57 @@ watch(streamingContent, () => {
|
||||
}
|
||||
}, { deep: true });
|
||||
|
||||
let loop: TaskLoop | undefined = undefined;
|
||||
|
||||
const handleSend = () => {
|
||||
if (!userInput.value.trim() || isLoading.value) return;
|
||||
|
||||
autoScroll.value = true; // 发送新消息时恢复自动滚动
|
||||
const userMessage = userInput.value.trim();
|
||||
tabStorage.messages.push({ role: 'user', content: userMessage });
|
||||
|
||||
// 后端接受属性 baseURL, apiKey, model, messages, temperature
|
||||
const baseURL = llms[llmManager.currentModelIndex].baseUrl;
|
||||
const apiKey = llms[llmManager.currentModelIndex].userToken;
|
||||
const model = llms[llmManager.currentModelIndex].userModel;
|
||||
const temperature = tabStorage.settings.temperature;
|
||||
const tools = getToolSchema(tabStorage.settings.enableTools);
|
||||
|
||||
const userMessages = [];
|
||||
if (tabStorage.settings.systemPrompt) {
|
||||
userMessages.push({
|
||||
role: 'system',
|
||||
content: tabStorage.settings.systemPrompt
|
||||
});
|
||||
}
|
||||
// 如果超出了 tabStorage.settings.contextLength, 则删除最早的消息
|
||||
const loadMessages = tabStorage.messages.slice(-tabStorage.settings.contextLength);
|
||||
userMessages.push(...loadMessages);
|
||||
|
||||
const chatData = {
|
||||
baseURL,
|
||||
apiKey,
|
||||
model,
|
||||
temperature,
|
||||
tools,
|
||||
messages: userMessages,
|
||||
};
|
||||
|
||||
autoScroll.value = true;
|
||||
isLoading.value = true;
|
||||
streamingContent.value = '';
|
||||
|
||||
const chunkHandler = bridge.addCommandListener('llm/chat/completions/chunk', data => {
|
||||
if (data.code !== 200) {
|
||||
handleError(data.msg || '请求模型服务时发生错误');
|
||||
return;
|
||||
}
|
||||
const { chunk } = data.msg;
|
||||
const userMessage = userInput.value.trim();
|
||||
|
||||
const content = chunk.choices[0]?.delta?.content || '';
|
||||
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
|
||||
|
||||
if (content) {
|
||||
streamingContent.value += content;
|
||||
scrollToBottom();
|
||||
}
|
||||
|
||||
if (toolCall) {
|
||||
if (toolCall.index === 0) {
|
||||
// 新的工具调用开始
|
||||
streamingToolCalls.value = [{
|
||||
id: toolCall.id,
|
||||
name: toolCall.function?.name || '',
|
||||
arguments: toolCall.function?.arguments || ''
|
||||
}];
|
||||
} else {
|
||||
// 累积现有工具调用的信息
|
||||
const currentCall = streamingToolCalls.value[toolCall.index];
|
||||
if (currentCall) {
|
||||
if (toolCall.id) {
|
||||
currentCall.id = toolCall.id;
|
||||
}
|
||||
if (toolCall.function?.name) {
|
||||
currentCall.name = toolCall.function.name;
|
||||
}
|
||||
if (toolCall.function?.arguments) {
|
||||
currentCall.arguments += toolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const finishReason = chunk.choices[0]?.finish_reason;
|
||||
if (finishReason === 'tool_calls') {
|
||||
// 工具调用完成,这里可以处理工具调用
|
||||
console.log('Tool calls completed:', streamingToolCalls.value);
|
||||
streamingToolCalls.value = [];
|
||||
}
|
||||
}, { once: false });
|
||||
|
||||
bridge.addCommandListener('llm/chat/completions/done', data => {
|
||||
if (data.code !== 200) {
|
||||
handleError(data.msg || '模型服务处理完成但返回错误');
|
||||
return;
|
||||
}
|
||||
if (streamingContent.value) {
|
||||
// 加入消息列表
|
||||
loop = new TaskLoop(
|
||||
streamingContent,
|
||||
streamingToolCalls,
|
||||
// onerror
|
||||
(msg) => {
|
||||
ElMessage({
|
||||
message: msg,
|
||||
type: 'error',
|
||||
duration: 3000
|
||||
});
|
||||
|
||||
tabStorage.messages.push({
|
||||
role: 'assistant',
|
||||
content: streamingContent.value
|
||||
content: `错误: ${msg}`
|
||||
});
|
||||
streamingContent.value = '';
|
||||
}
|
||||
// 如果有工具调用结果,也加入消息列表
|
||||
if (streamingToolCalls.value.length > 0) {
|
||||
streamingToolCalls.value.forEach(tool => {
|
||||
if (tool.id) {
|
||||
tabStorage.messages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: tool.id,
|
||||
content: tool.arguments
|
||||
});
|
||||
}
|
||||
});
|
||||
streamingToolCalls.value = [];
|
||||
}
|
||||
isLoading.value = false;
|
||||
chunkHandler();
|
||||
}, { once: true });
|
||||
|
||||
bridge.postMessage({
|
||||
command: 'llm/chat/completions',
|
||||
data: chatData
|
||||
});
|
||||
isLoading.value = false;
|
||||
},
|
||||
// onchunk
|
||||
(chunk) => {
|
||||
scrollToBottom();
|
||||
},
|
||||
// ondone
|
||||
() => {
|
||||
isLoading.value = false;
|
||||
scrollToBottom();
|
||||
|
||||
loop = undefined;
|
||||
}
|
||||
);
|
||||
|
||||
loop.start(tabStorage, userMessage);
|
||||
userInput.value = '';
|
||||
};
|
||||
|
||||
const handleAbort = () => {
|
||||
bridge.postMessage({
|
||||
command: 'llm/chat/completions/abort', // 假设后端有对应的中止命令
|
||||
data: {}
|
||||
});
|
||||
isLoading.value = false;
|
||||
streamingContent.value = '';
|
||||
ElMessage.info('请求已中止');
|
||||
};
|
||||
|
||||
const handleError = (msg: string) => {
|
||||
ElMessage.error(msg);
|
||||
tabStorage.messages.push({
|
||||
role: 'assistant',
|
||||
content: `错误: ${msg}`
|
||||
});
|
||||
streamingContent.value = '';
|
||||
isLoading.value = false;
|
||||
if (loop) {
|
||||
loop.abort();
|
||||
isLoading.value = false;
|
||||
ElMessage.info('请求已中止');
|
||||
}
|
||||
};
|
||||
|
||||
onMounted(() => {
|
||||
|
@ -3,8 +3,7 @@
|
||||
<el-tooltip content="选择模型" placement="top">
|
||||
<div class="setting-button" size="small" @click="showModelDialog = true">
|
||||
<span class="iconfont icon-model">
|
||||
{{ llms[llmManager.currentModelIndex].name }}/{{
|
||||
llms[llmManager.currentModelIndex].models[selectedModelIndex] }}
|
||||
{{ currentServerName }}/{{ currentModelName }}
|
||||
</span>
|
||||
</div>
|
||||
</el-tooltip>
|
||||
@ -142,6 +141,22 @@ const showTemperatureSlider = ref(false);
|
||||
const showContextLengthDialog = ref(false);
|
||||
const showSystemPromptDialog = ref(false);
|
||||
|
||||
const currentServerName = computed(() => {
|
||||
const currentLlm = llms[llmManager.currentModelIndex];
|
||||
if (currentLlm) {
|
||||
return currentLlm.name;
|
||||
}
|
||||
return '';
|
||||
});
|
||||
|
||||
const currentModelName = computed(() => {
|
||||
const currentLlm = llms[llmManager.currentModelIndex];
|
||||
if (currentLlm) {
|
||||
return currentLlm.models[selectedModelIndex.value];
|
||||
}
|
||||
return '';
|
||||
});
|
||||
|
||||
const tab = tabs.content[props.tabId];
|
||||
const tabStorage = tab.storage as ChatStorage & { settings: ChatSetting };
|
||||
|
||||
@ -157,7 +172,6 @@ if (!tabStorage.settings) {
|
||||
} as ChatSetting;
|
||||
}
|
||||
|
||||
|
||||
const selectedModelIndex = ref(llmManager.currentModelIndex);
|
||||
|
||||
const availableModels = computed(() => {
|
||||
|
@ -1,5 +1,6 @@
|
||||
/* eslint-disable */
|
||||
import { Ref } from "vue";
|
||||
import { ToolCall, ChatMessage, ChatStorage, getToolSchema } from "./chat";
|
||||
import { ToolCall, ChatStorage, getToolSchema } from "./chat";
|
||||
import { useMessageBridge } from "@/api/message-bridge";
|
||||
import type { OpenAI } from 'openai';
|
||||
import { callTool } from "../tool/tools";
|
||||
@ -16,15 +17,15 @@ interface TaskLoopOptions {
|
||||
*/
|
||||
export class TaskLoop {
|
||||
private bridge = useMessageBridge();
|
||||
private currentChatId = '';
|
||||
|
||||
constructor(
|
||||
private readonly streamingContent: Ref<string>,
|
||||
private readonly streamingToolCalls: Ref<ToolCall[]>,
|
||||
private readonly messages: ChatMessage[],
|
||||
private readonly taskOptions: TaskLoopOptions = { maxEpochs: 20 },
|
||||
private readonly onError: (msg: string) => void = (msg) => {},
|
||||
private readonly onChunk: (chunk: ChatCompletionChunk) => void = (chunk) => {},
|
||||
private readonly onDone: () => void = () => {},
|
||||
private readonly taskOptions: TaskLoopOptions = { maxEpochs: 20 },
|
||||
) {}
|
||||
|
||||
private async handleToolCalls(toolCalls: ToolCall[]) {
|
||||
@ -83,9 +84,8 @@ export class TaskLoop {
|
||||
|
||||
private doConversation(chatData: ChatCompletionCreateParamsBase) {
|
||||
|
||||
const bridge = useMessageBridge();
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const chunkHandler = bridge.addCommandListener('llm/chat/completions/chunk', data => {
|
||||
const chunkHandler = this.bridge.addCommandListener('llm/chat/completions/chunk', data => {
|
||||
if (data.code !== 200) {
|
||||
this.onError(data.msg || '请求模型服务时发生错误');
|
||||
reject(new Error(data.msg || '请求模型服务时发生错误'));
|
||||
@ -100,14 +100,14 @@ export class TaskLoop {
|
||||
this.onChunk(chunk);
|
||||
}, { once: false });
|
||||
|
||||
bridge.addCommandListener('llm/chat/completions/done', data => {
|
||||
this.bridge.addCommandListener('llm/chat/completions/done', data => {
|
||||
this.onDone();
|
||||
chunkHandler();
|
||||
|
||||
resolve();
|
||||
}, { once: true });
|
||||
|
||||
bridge.postMessage({
|
||||
this.bridge.postMessage({
|
||||
command: 'llm/chat/completions',
|
||||
data: chatData
|
||||
});
|
||||
@ -149,14 +149,27 @@ export class TaskLoop {
|
||||
return chatData;
|
||||
}
|
||||
|
||||
public abort() {
|
||||
this.bridge.postMessage({
|
||||
command: 'llm/chat/completions/abort',
|
||||
data: {
|
||||
id: this.currentChatId
|
||||
}
|
||||
});
|
||||
this.streamingContent.value = '';
|
||||
this.streamingToolCalls.value = [];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @description 开启循环,异步更新 DOM
|
||||
*/
|
||||
public async start(tabStorage: ChatStorage) {
|
||||
// 后端接受属性 baseURL, apiKey, model, messages, temperature
|
||||
public async start(tabStorage: ChatStorage, userMessage: string) {
|
||||
// 添加目前的消息
|
||||
tabStorage.messages.push({ role: 'user', content: userMessage });
|
||||
|
||||
for (let i = 0; i < this.taskOptions.maxEpochs; ++ i) {
|
||||
|
||||
while (true) {
|
||||
// 初始累计清空
|
||||
this.streamingContent.value = '';
|
||||
this.streamingToolCalls.value = [];
|
||||
@ -164,6 +177,8 @@ export class TaskLoop {
|
||||
// 构造 chatData
|
||||
const chatData = this.makeChatData(tabStorage);
|
||||
|
||||
this.currentChatId = chatData.id!;
|
||||
|
||||
// 发送请求
|
||||
await this.doConversation(chatData);
|
||||
|
||||
@ -185,6 +200,12 @@ export class TaskLoop {
|
||||
role: 'assistant',
|
||||
content: this.streamingContent.value
|
||||
});
|
||||
break;
|
||||
|
||||
} else {
|
||||
// 一些提示
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,14 @@ export const tabs = reactive<{
|
||||
],
|
||||
activeIndex: 0,
|
||||
get activeTab() {
|
||||
return this.content[this.activeIndex];
|
||||
return this.content[this.activeIndex] || {
|
||||
name: 'blank',
|
||||
icon: 'icon-blank',
|
||||
type: 'blank',
|
||||
component: undefined,
|
||||
componentIndex: -1,
|
||||
storage: {},
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -52,7 +52,7 @@ export function loadPanels() {
|
||||
});
|
||||
}
|
||||
|
||||
let debounceHandler: number;
|
||||
let debounceHandler: NodeJS.Timeout;
|
||||
|
||||
export function safeSavePanels() {
|
||||
clearTimeout(debounceHandler);
|
||||
|
@ -16,7 +16,7 @@
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { defineComponent } from 'vue';
|
||||
import { defineComponent, computed } from 'vue';
|
||||
|
||||
import Welcome from './welcome.vue';
|
||||
import { tabs } from '@/components/main-panel/panel';
|
||||
|
@ -14,7 +14,7 @@ export async function chatCompletionHandler(client: MCPClient | undefined, data:
|
||||
}
|
||||
|
||||
|
||||
const { baseURL, apiKey, model, messages, temperature, tools = [] } = data;
|
||||
let { baseURL, apiKey, model, messages, temperature, tools = [] } = data;
|
||||
|
||||
try {
|
||||
const client = new OpenAI({
|
||||
@ -22,7 +22,9 @@ export async function chatCompletionHandler(client: MCPClient | undefined, data:
|
||||
apiKey
|
||||
});
|
||||
|
||||
console.log(tools);
|
||||
if (tools.length === 0) {
|
||||
tools = undefined;
|
||||
}
|
||||
|
||||
|
||||
const stream = await client.chat.completions.create({
|
||||
|
@ -1,5 +1,5 @@
|
||||
{
|
||||
"currentIndex": 1,
|
||||
"currentIndex": 0,
|
||||
"tabs": [
|
||||
{
|
||||
"name": "交互测试",
|
||||
@ -10,7 +10,19 @@
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "test add"
|
||||
"content": "hello"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "请问什么是聚类算法"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "聚类算法(Clustering Algorithm)是一种**无监督学习**(Unsupervised Learning)方法,用于将数据集中的样本划分为若干个组(称为“簇”或“类”),使得同一组内的样本彼此相似,而不同组的样本差异较大。聚类算法的目标是通过数据的内在结构,发现数据中的潜在模式或分组。\n\n### 聚类算法的核心思想\n1. **相似性度量**:通过距离(如欧氏距离、曼哈顿距离)或相似性(如余弦相似度)来衡量样本之间的相似程度。\n2. **簇内紧凑性**:同一簇内的样本应尽可能相似。\n3. **簇间分离性**:不同簇的样本应尽可能不相似。\n\n---\n\n### 常见的聚类算法\n1. **K-Means** \n - 将数据划分为K个簇,每个簇的中心由簇内样本的平均值表示。\n - 适用于数值型数据,计算高效,但对初始中心点敏感,且需要预先指定K值。\n\n2. **层次聚类(Hierarchical Clustering)** \n - 通过自底向上(凝聚法)或自顶向下(分裂法)的方式构建树状图(Dendrogram)。\n - 不需要预先指定簇的数量,但计算复杂度较高。\n\n3. **DBSCAN(Density-Based Spatial Clustering of Applications with Noise)** \n - 基于密度的聚类,能够发现任意形状的簇,并能识别噪声点。\n - 不需要预先指定簇的数量,但对参数(如邻域半径和最小样本数)敏感。\n\n4. **高斯混合模型(GMM, Gaussian Mixture Model)** \n - 假设数据由多个高斯分布混合生成,通过概率模型进行聚类。\n - 适用于数据分布复杂的情况,但计算复杂度较高。\n\n5. **谱聚类(Spectral Clustering)** \n - 基于图论的聚类方法,利用数据的相似性矩阵进行降维和聚类。\n - 适用于非凸形状的簇,但对相似性矩阵的计算要求较高。\n\n---\n\n### 聚类算法的应用场景\n1. **客户分群**:根据消费行为将用户划分为不同群体,便于精准营销。\n2. **图像分割**:将图像中的像素聚类为不同的区域。\n3. **异常检测**:通过聚类识别离群点(如金融欺诈检测)。\n4. **文本挖掘**:对文档或词汇进行聚类,发现主题或类别。\n5. **生物信息学**:基因表达数据的聚类分析。\n\n---\n\n### 聚类算法的评估指标\n1. **轮廓系数(Silhouette Coefficient)**:衡量样本与同簇和其他簇的相似性,值越接近1表示聚类效果越好。\n2. **Calinski-Harabasz指数**:基于簇内离散度和簇间离散度的比值,值越大表示聚类效果越好。\n3. **Davies-Bouldin指数**:衡量簇内距离与簇间距离的比值,值越小表示聚类效果越好。\n\n---\n\n如果你有具体的数据或场景需要聚类分析,可以告诉我,我可以帮助你选择合适的算法或工具!"
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
@ -43,24 +55,6 @@
|
||||
"systemPrompt": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "工具",
|
||||
"icon": "icon-tool",
|
||||
"type": "blank",
|
||||
"componentIndex": 2,
|
||||
"storage": {
|
||||
"lastToolCallResponse": {
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "4"
|
||||
}
|
||||
],
|
||||
"isError": false
|
||||
},
|
||||
"currentToolName": "add"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user