merge & fix tsx lanuch issue

This commit is contained in:
锦恢 2025-06-04 00:17:26 +08:00
parent 167c791452
commit 68db65c61b
6 changed files with 207 additions and 2965 deletions

3149
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -214,8 +214,7 @@
},
"workspaces": [
"service",
"renderer",
"software"
"renderer"
],
"scripts": {
"setup": "npm i && npm run prepare:ocr",

View File

@ -132,6 +132,7 @@ import ConnectInterfaceOpenai from './connect-interface-openai.vue';
import ConnectTest from './connect-test.vue';
import { llmSettingRef, makeSimpleTalk, simpleTestResult } from './api';
import { useMessageBridge } from '@/api/message-bridge';
import { mcpSetting } from '@/hook/mcp';
defineComponent({ name: 'api' });
const { t } = useI18n();
@ -233,11 +234,13 @@ async function updateModels() {
const llm = llms[llmManager.currentModelIndex];
const apiKey = llm.userToken;
const baseURL = llm.baseUrl;
const proxyServer = mcpSetting.proxyServer;
const bridge = useMessageBridge();
const { code, msg } = await bridge.commandRequest('llm/models', {
apiKey,
baseURL
baseURL,
proxyServer
});
const isGemini = baseURL.includes('googleapis');

View File

@ -28,7 +28,7 @@ const { t } = useI18n();
const isGoogle = computed(() => {
const model = llms[llmManager.currentModelIndex];
return model.userModel.startsWith('gemini') || model.baseUrl.includes('googleapis');
return model.userModel?.startsWith('gemini') || model.baseUrl.includes('googleapis');
});
console.log(llms[llmManager.currentModelIndex]);

View File

@ -6,7 +6,7 @@
"types": "dist/index.d.ts",
"type": "module",
"scripts": {
"serve": "tsx watch src/main.ts",
"serve": "nodemon --watch src --exec tsx src/main.ts",
"build": "tsc",
"build:watch": "tsc --watch",
"postbuild": "node ./scripts/post-build.mjs",
@ -28,6 +28,7 @@
"@types/pako": "^2.0.3",
"@types/ws": "^8.18.0",
"esbuild": "^0.25.3",
"nodemon": "^3.1.10",
"tsconfig-paths": "^4.2.0",
"tsx": "^4.19.4",
"typescript": "^5.8.3",

View File

@ -5,6 +5,7 @@ import { PostMessageble } from "../hook/adapter.js";
import { getClient } from "../mcp/connect.service.js";
import { abortMessageService, streamingChatCompletion } from "./llm.service.js";
import { OpenAI } from "openai";
import { axiosFetch } from "src/hook/axios-fetch.js";
export class LlmController {
@Controller('llm/chat/completions')
@ -41,9 +42,14 @@ export class LlmController {
const {
baseURL,
apiKey,
proxyServer
} = data;
const client = new OpenAI({ apiKey, baseURL });
const client = new OpenAI({
apiKey,
baseURL,
});
const models = await client.models.list();
return {