完成大模型的输入传输
This commit is contained in:
parent
3ac2af597c
commit
7330889290
4
renderer/package-lock.json
generated
4
renderer/package-lock.json
generated
@ -1,11 +1,11 @@
|
||||
{
|
||||
"name": "app",
|
||||
"name": "renderer",
|
||||
"version": "0.1.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "app",
|
||||
"name": "renderer",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"core-js": "^3.8.3",
|
||||
|
@ -39,7 +39,7 @@ onMounted(() => {
|
||||
|
||||
// 如果是 debug 模式,直接连接项目中的服务器
|
||||
if (acquireVsCodeApi === undefined) {
|
||||
connectionArgs.commandString = 'uv run mcp run ../servers/main.py';
|
||||
connectionArgs.commandString = 'mcp run ../servers/main.py';
|
||||
connectionMethods.current = 'STDIO';
|
||||
|
||||
bridge.addCommandListener('connect', data => {
|
||||
|
@ -28,13 +28,13 @@
|
||||
|
||||
<el-footer class="chat-footer" ref="footerRef">
|
||||
<div class="input-area">
|
||||
|
||||
<Setting :tabId="tabId" />
|
||||
|
||||
<div class="input-wrapper">
|
||||
<Setting :tabId="tabId" />
|
||||
|
||||
<el-input v-model="userInput" type="textarea" :rows="inputHeightLines" :maxlength="2000"
|
||||
placeholder="输入消息..." :disabled="isLoading" @keydown.enter="handleKeydown" resize="none"
|
||||
placeholder="输入消息..." @keydown.enter="handleKeydown" resize="none"
|
||||
class="chat-input" />
|
||||
|
||||
<el-button type="primary" :loading="isLoading" @click="handleSend" class="send-button"
|
||||
:disabled="!userInput.trim()">
|
||||
<span class="iconfont icon-send"></span>
|
||||
@ -48,14 +48,14 @@
|
||||
<script setup lang="ts">
|
||||
import { ref, onMounted, defineComponent, defineProps, onUnmounted, computed } from 'vue';
|
||||
import { useI18n } from 'vue-i18n';
|
||||
import { User, Comment } from '@element-plus/icons-vue';
|
||||
import { Comment } from '@element-plus/icons-vue';
|
||||
import { useMessageBridge } from "@/api/message-bridge";
|
||||
import { ElMessage } from 'element-plus';
|
||||
import { llmManager, llms } from '@/views/setting/llm';
|
||||
import { tabs } from '../panel';
|
||||
import { ChatMessage, ChatStorage } from './chat';
|
||||
|
||||
import Setting from './setting.vue';
|
||||
import { llmManager, llms } from '@/views/setting/llm';
|
||||
|
||||
defineComponent({ name: 'chat' });
|
||||
|
||||
@ -71,55 +71,19 @@ const props = defineProps({
|
||||
const tab = tabs.content[props.tabId];
|
||||
const tabStorage = tab.storage as ChatStorage;
|
||||
|
||||
|
||||
const bridge = useMessageBridge();
|
||||
const userInput = ref('');
|
||||
const inputHeightLines = computed(() => {
|
||||
const currentLines = userInput.value.split('\n').length;
|
||||
return Math.min(12, Math.max(5, currentLines));
|
||||
});
|
||||
const messages = ref<ChatMessage[]>([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '你好!我是AI助手,有什么可以帮您的吗?'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: '你好,能帮我写一封求职信吗?'
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '当然可以。请问您应聘的是什么职位?需要包含哪些特别的信息吗?'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: '我想应聘前端开发工程师,有3年Vue和React经验'
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '好的,我已根据您的要求写了一封求职信模板:\n\n尊敬的招聘经理,\n\n您好!我在贵公司官网上看到前端开发工程师的招聘信息...'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: '谢谢!能再帮我优化一下简历中的项目描述部分吗?'
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: '当然可以。建议采用STAR法则(Situation-Task-Action-Result)来描述项目经验,这样更能突出您的贡献和价值。'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: '什么是STAR法则?能举个例子吗?'
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: 'STAR法则是一种结构化表达方法:\n\n情境(Situation):项目背景\n任务(Task):你的职责\n行动(Action):采取的措施\n结果(Result):取得的成果\n\n例如:开发了基于Vue3的管理系统,优化了页面加载速度30%...'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: '明白了,这样写确实更专业!'
|
||||
}
|
||||
]);
|
||||
|
||||
// 创建 messages
|
||||
if (!tabStorage.messages) {
|
||||
tabStorage.messages = [] as ChatMessage[];
|
||||
}
|
||||
const messages = tabStorage.messages;
|
||||
|
||||
const isLoading = ref(false);
|
||||
const streamingContent = ref('');
|
||||
const chatContainerRef = ref<HTMLElement>();
|
||||
@ -146,16 +110,34 @@ const handleSend = () => {
|
||||
if (!userInput.value.trim() || isLoading.value) return;
|
||||
|
||||
const userMessage = userInput.value.trim();
|
||||
messages.value.push({ role: 'user', content: userMessage });
|
||||
messages.push({ role: 'user', content: userMessage });
|
||||
|
||||
// 后端接受属性 baseURL, apiKey, model, messages, temperature
|
||||
const baseURL = llms[llmManager.currentModelIndex].baseUrl;
|
||||
const apiKey = llms[llmManager.currentModelIndex].userToken;
|
||||
const model = llms[llmManager.currentModelIndex].userModel;
|
||||
const temperature = tabStorage.settings.temperature;
|
||||
|
||||
const userMessages = [];
|
||||
if (tabStorage.settings.systemPrompt) {
|
||||
userMessages.push({
|
||||
role: 'system',
|
||||
content: tabStorage.settings.systemPrompt
|
||||
});
|
||||
}
|
||||
|
||||
userMessages.concat(messages);
|
||||
userMessages.push({
|
||||
role: 'assistant',
|
||||
content: streamingContent.value
|
||||
});
|
||||
|
||||
const chatData = {
|
||||
messages: [
|
||||
...messages.value.filter(msg => msg.role === 'user').map(msg => ({
|
||||
role: msg.role,
|
||||
content: msg.content
|
||||
})),
|
||||
{ role: 'assistant', content: streamingContent.value }
|
||||
]
|
||||
baseURL,
|
||||
apiKey,
|
||||
model,
|
||||
temperature,
|
||||
messages: userMessages,
|
||||
};
|
||||
|
||||
isLoading.value = true;
|
||||
@ -171,7 +153,7 @@ const handleSend = () => {
|
||||
|
||||
const handleError = (errorMsg: string) => {
|
||||
ElMessage.error(errorMsg);
|
||||
messages.value.push({
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: `错误: ${errorMsg}`
|
||||
});
|
||||
@ -200,7 +182,7 @@ onMounted(() => {
|
||||
return;
|
||||
}
|
||||
if (streamingContent.value) {
|
||||
messages.value.push({
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: streamingContent.value
|
||||
});
|
||||
@ -323,6 +305,12 @@ onUnmounted(() => {
|
||||
border-radius: .5em;
|
||||
}
|
||||
|
||||
:deep(.chat-settings) {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
bottom: 8px;
|
||||
z-index: 1;
|
||||
}
|
||||
.typing-cursor {
|
||||
animation: blink 1s infinite;
|
||||
}
|
||||
|
@ -46,7 +46,9 @@
|
||||
|
||||
<!-- 模型选择对话框 -->
|
||||
<el-dialog v-model="showModelDialog" title="选择模型" width="400px">
|
||||
<el-radio-group v-model="selectedModelIndex">
|
||||
<el-radio-group v-model="selectedModelIndex"
|
||||
@change="onRadioGroupChange"
|
||||
>
|
||||
<el-radio v-for="(model, index) in availableModels" :key="index" :label="index">
|
||||
{{ model }}
|
||||
</el-radio>
|
||||
@ -157,6 +159,11 @@ const confirmModelChange = () => {
|
||||
llmManager.currentModelIndex = selectedModelIndex.value;
|
||||
showModelDialog.value = false;
|
||||
};
|
||||
|
||||
const onRadioGroupChange = () => {
|
||||
const currentModel = llms[llmManager.currentModelIndex].models[selectedModelIndex.value];
|
||||
llms[llmManager.currentModelIndex].userModel = currentModel;
|
||||
};
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
@ -164,6 +171,12 @@ const confirmModelChange = () => {
|
||||
display: flex;
|
||||
gap: 2px;
|
||||
padding: 8px 0;
|
||||
background-color: var(--sidebar);
|
||||
width: fit-content;
|
||||
border-radius: 99%;
|
||||
bottom: 0px;
|
||||
z-index: 10;
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.setting-button {
|
||||
|
@ -45,7 +45,6 @@ let tabCounter = 1;
|
||||
watch(
|
||||
() => tabs,
|
||||
(newValue, oldValue) => {
|
||||
console.log('state change');
|
||||
safeSavePanels();
|
||||
},
|
||||
{ deep: true }
|
||||
|
@ -58,7 +58,7 @@ export function safeSavePanels() {
|
||||
clearTimeout(debounceHandler);
|
||||
debounceHandler = setTimeout(() => {
|
||||
savePanels();
|
||||
}, 200);
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
export function savePanels(saveHandler?: () => void) {
|
||||
|
@ -12,7 +12,7 @@ export async function chatCompletionHandler(client: MCPClient | undefined, data:
|
||||
}
|
||||
|
||||
|
||||
const { baseURL, apiKey, model, messages } = data;
|
||||
const { baseURL, apiKey, model, messages, temperature } = data;
|
||||
|
||||
try {
|
||||
const client = new OpenAI({
|
||||
@ -23,6 +23,8 @@ export async function chatCompletionHandler(client: MCPClient | undefined, data:
|
||||
const stream = await client.chat.completions.create({
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
web_search_options: {},
|
||||
stream: true
|
||||
});
|
||||
|
||||
|
@ -12,9 +12,19 @@
|
||||
"enableTools": true,
|
||||
"enableWebSearch": false,
|
||||
"temperature": 0.7,
|
||||
"contextLength": 41,
|
||||
"systemPrompt": "ad"
|
||||
}
|
||||
"contextLength": 10,
|
||||
"systemPrompt": "你的名字是 openmcp"
|
||||
},
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "请问你的名字是什么"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "我的名字是 **OpenMCP**(Open Multi-modal Cognitive Platform)。 \n\n我是由 **深度求索(DeepSeek)** 开发的一款 **多模态认知大模型**,能够处理文本、图像、文档等多种信息,并具备强大的理解和推理能力。 \n\n你可以叫我 **OpenMCP**,或者简称 **MCP**!😊 有什么我可以帮你的吗?"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user