support google gemini
This commit is contained in:
parent
1b85207b7f
commit
f8447cadb6
@ -1,8 +1,8 @@
|
||||
@font-face {
|
||||
font-family: "iconfont"; /* Project id 4870215 */
|
||||
src: url('iconfont.woff2?t=1747820198035') format('woff2'),
|
||||
url('iconfont.woff?t=1747820198035') format('woff'),
|
||||
url('iconfont.ttf?t=1747820198035') format('truetype');
|
||||
src: url('iconfont.woff2?t=1748859145515') format('woff2'),
|
||||
url('iconfont.woff?t=1748859145515') format('woff'),
|
||||
url('iconfont.ttf?t=1748859145515') format('truetype');
|
||||
}
|
||||
|
||||
.iconfont {
|
||||
@ -13,6 +13,10 @@
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
.icon-proxy:before {
|
||||
content: "\e723";
|
||||
}
|
||||
|
||||
.icon-parallel:before {
|
||||
content: "\e61d";
|
||||
}
|
||||
|
Binary file not shown.
@ -9,9 +9,10 @@ import { ElMessage } from "element-plus";
|
||||
import { handleToolCalls, type ToolCallResult } from "./handle-tool-calls";
|
||||
import { getPlatform } from "@/api/platform";
|
||||
import { getSystemPrompt } from "../chat-box/options/system-prompt";
|
||||
import { mcpSetting } from "@/hook/mcp";
|
||||
|
||||
export type ChatCompletionChunk = OpenAI.Chat.Completions.ChatCompletionChunk;
|
||||
export type ChatCompletionCreateParamsBase = OpenAI.Chat.Completions.ChatCompletionCreateParams & { id?: string };
|
||||
export type ChatCompletionCreateParamsBase = OpenAI.Chat.Completions.ChatCompletionCreateParams & { id?: string, proxyServer?: string };
|
||||
export interface TaskLoopOptions {
|
||||
maxEpochs?: number;
|
||||
maxJsonParseRetry?: number;
|
||||
@ -79,13 +80,18 @@ export class TaskLoop {
|
||||
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
|
||||
|
||||
if (toolCall) {
|
||||
const currentCall = this.streamingToolCalls.value[toolCall.index];
|
||||
if (toolCall.index === undefined || toolCall.index === null) {
|
||||
console.warn('tool_call.index is undefined or null');
|
||||
}
|
||||
|
||||
const index = toolCall.index || 0;
|
||||
const currentCall = this.streamingToolCalls.value[index];
|
||||
|
||||
if (currentCall === undefined) {
|
||||
// 新的工具调用开始
|
||||
this.streamingToolCalls.value[toolCall.index] = {
|
||||
this.streamingToolCalls.value[index] = {
|
||||
id: toolCall.id,
|
||||
index: toolCall.index,
|
||||
index,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: toolCall.function?.name || '',
|
||||
@ -124,6 +130,8 @@ export class TaskLoop {
|
||||
// data.code 一定为 200,否则不会走这个 route
|
||||
const { chunk } = data.msg as { chunk: ChatCompletionChunk };
|
||||
|
||||
console.log(chunk);
|
||||
|
||||
// 处理增量的 content 和 tool_calls
|
||||
this.handleChunkDeltaContent(chunk);
|
||||
this.handleChunkDeltaToolCalls(chunk);
|
||||
@ -181,6 +189,7 @@ export class TaskLoop {
|
||||
const temperature = tabStorage.settings.temperature;
|
||||
const tools = getToolSchema(tabStorage.settings.enableTools);
|
||||
const parallelToolCalls = tabStorage.settings.parallelToolCalls;
|
||||
const proxyServer = mcpSetting.proxyServer || '';
|
||||
|
||||
const userMessages = [];
|
||||
|
||||
@ -211,6 +220,7 @@ export class TaskLoop {
|
||||
tools,
|
||||
parallelToolCalls,
|
||||
messages: userMessages,
|
||||
proxyServer
|
||||
} as ChatCompletionCreateParamsBase;
|
||||
|
||||
return chatData;
|
||||
@ -396,7 +406,7 @@ export class TaskLoop {
|
||||
tabStorage.messages.push({
|
||||
role: 'tool',
|
||||
index: toolCall.index || 0,
|
||||
tool_call_id: toolCall.id || toolCall.function.name,
|
||||
tool_call_id: toolCall.id || '',
|
||||
content: toolCallResult.content,
|
||||
extraInfo: {
|
||||
created: Date.now(),
|
||||
|
@ -33,4 +33,5 @@ export function normaliseJavascriptType(type: string) {
|
||||
|
||||
export const mcpSetting = reactive({
|
||||
timeout: 60,
|
||||
proxyServer: '',
|
||||
});
|
@ -20,6 +20,7 @@ export async function loadSetting() {
|
||||
llmManager.currentModelIndex = persistConfig.MODEL_INDEX || 0;
|
||||
I18n.global.locale.value = persistConfig.LANG || 'zh';
|
||||
mcpSetting.timeout = persistConfig.MCP_TIMEOUT_SEC || 60;
|
||||
mcpSetting.proxyServer = persistConfig.PROXY_SERVER || '';
|
||||
|
||||
persistConfig.LLM_INFO.forEach((element: any) => {
|
||||
llms.push(element);
|
||||
@ -51,7 +52,8 @@ export function saveSetting(saveHandler?: () => void) {
|
||||
MODEL_INDEX: llmManager.currentModelIndex,
|
||||
LLM_INFO: JSON.parse(JSON.stringify(llms)),
|
||||
LANG: I18n.global.locale.value,
|
||||
MCP_TIMEOUT_SEC: mcpSetting.timeout
|
||||
MCP_TIMEOUT_SEC: mcpSetting.timeout,
|
||||
PROXY_SERVER: mcpSetting.proxyServer
|
||||
};
|
||||
|
||||
bridge.addCommandListener('setting/save', data => {
|
||||
|
@ -156,5 +156,6 @@
|
||||
"error": "خطأ",
|
||||
"feedback": "تعليقات",
|
||||
"waiting-mcp-server": "في انتظار استجابة خادم MCP",
|
||||
"parallel-tool-calls": "السماح للنموذج باستدعاء أدوات متعددة في رد واحد"
|
||||
"parallel-tool-calls": "السماح للنموذج باستدعاء أدوات متعددة في رد واحد",
|
||||
"proxy-server": "خادم وكيل"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "Fehler",
|
||||
"feedback": "Feedback",
|
||||
"waiting-mcp-server": "Warten auf Antwort vom MCP-Server",
|
||||
"parallel-tool-calls": "Erlauben Sie dem Modell, mehrere Tools in einer einzigen Antwort aufzurufen"
|
||||
"parallel-tool-calls": "Erlauben Sie dem Modell, mehrere Tools in einer einzigen Antwort aufzurufen",
|
||||
"proxy-server": "Proxy-Server"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "Error",
|
||||
"feedback": "Feedback",
|
||||
"waiting-mcp-server": "Waiting for MCP server response",
|
||||
"parallel-tool-calls": "Allow the model to call multiple tools in a single reply"
|
||||
"parallel-tool-calls": "Allow the model to call multiple tools in a single reply",
|
||||
"proxy-server": "Proxy server"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "Erreur",
|
||||
"feedback": "Retour",
|
||||
"waiting-mcp-server": "En attente de la réponse du serveur MCP",
|
||||
"parallel-tool-calls": "Permettre au modèle d'appeler plusieurs outils en une seule réponse"
|
||||
"parallel-tool-calls": "Permettre au modèle d'appeler plusieurs outils en une seule réponse",
|
||||
"proxy-server": "Serveur proxy"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "エラー",
|
||||
"feedback": "フィードバック",
|
||||
"waiting-mcp-server": "MCPサーバーの応答を待機中",
|
||||
"parallel-tool-calls": "モデルが単一の返信で複数のツールを呼び出すことを許可する"
|
||||
"parallel-tool-calls": "モデルが単一の返信で複数のツールを呼び出すことを許可する",
|
||||
"proxy-server": "プロキシサーバー"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "오류",
|
||||
"feedback": "피드백",
|
||||
"waiting-mcp-server": "MCP 서버 응답 대기 중",
|
||||
"parallel-tool-calls": "모델이 단일 응답에서 여러 도구를 호출할 수 있도록 허용"
|
||||
"parallel-tool-calls": "모델이 단일 응답에서 여러 도구를 호출할 수 있도록 허용",
|
||||
"proxy-server": "프록시 서버"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "Ошибка",
|
||||
"feedback": "Обратная связь",
|
||||
"waiting-mcp-server": "Ожидание ответа от сервера MCP",
|
||||
"parallel-tool-calls": "Разрешить модели вызывать несколько инструментов в одном ответе"
|
||||
"parallel-tool-calls": "Разрешить модели вызывать несколько инструментов в одном ответе",
|
||||
"proxy-server": "Прокси-сервер"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "错误",
|
||||
"feedback": "反馈",
|
||||
"waiting-mcp-server": "等待 MCP 服务器响应",
|
||||
"parallel-tool-calls": "允许模型在单轮回复中调用多个工具"
|
||||
"parallel-tool-calls": "允许模型在单轮回复中调用多个工具",
|
||||
"proxy-server": "代理服务器"
|
||||
}
|
@ -156,5 +156,6 @@
|
||||
"error": "錯誤",
|
||||
"feedback": "反饋",
|
||||
"waiting-mcp-server": "等待MCP伺服器響應",
|
||||
"parallel-tool-calls": "允許模型在單輪回覆中調用多個工具"
|
||||
"parallel-tool-calls": "允許模型在單輪回覆中調用多個工具",
|
||||
"proxy-server": "代理伺服器"
|
||||
}
|
@ -27,6 +27,20 @@
|
||||
@change="safeSaveSetting" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="setting-option">
|
||||
<span>
|
||||
<span class="iconfont icon-proxy"></span>
|
||||
<span class="option-title">{{ t('proxy-server') }}</span>
|
||||
</span>
|
||||
<div style="width: 200px;">
|
||||
<el-input
|
||||
v-model="mcpSetting.proxyServer"
|
||||
:placeholder="'http://localhost:7890'"
|
||||
@input="safeSaveSetting"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import axios, { AxiosResponse } from "axios";
|
||||
import axios, { AxiosProxyConfig } from "axios";
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||
|
||||
interface FetchOptions {
|
||||
method?: string;
|
||||
@ -16,7 +17,7 @@ interface FetchResponse {
|
||||
redirected: boolean;
|
||||
type: string;
|
||||
body: any;
|
||||
|
||||
|
||||
json(): Promise<any>;
|
||||
text(): Promise<string>;
|
||||
arrayBuffer(): Promise<ArrayBuffer>;
|
||||
@ -24,7 +25,7 @@ interface FetchResponse {
|
||||
}
|
||||
|
||||
interface ReadableStreamDefaultReader {
|
||||
read(): Promise<{done: boolean, value?: any}>;
|
||||
read(): Promise<{ done: boolean, value?: any }>;
|
||||
cancel(): Promise<void>;
|
||||
releaseLock(): void;
|
||||
get closed(): boolean;
|
||||
@ -185,8 +186,18 @@ function adaptResponse(axiosResponse: FetchOptions): FetchResponse {
|
||||
/**
|
||||
* @description 主函数 - 用 axios 实现 fetch
|
||||
*/
|
||||
export async function axiosFetch(url: any, options: any): Promise<any> {
|
||||
const axiosConfig = adaptRequestOptions(url, options);
|
||||
export async function axiosFetch(input: any, init: any, requestOption: { proxyServer?: string } = {}): Promise<any> {
|
||||
const axiosConfig = adaptRequestOptions(input, init);
|
||||
|
||||
const {
|
||||
proxyServer = ''
|
||||
} = requestOption;
|
||||
|
||||
if (proxyServer) {
|
||||
const proxyAgent = new HttpsProxyAgent(proxyServer);
|
||||
axiosConfig.httpsAgent = proxyAgent;
|
||||
axiosConfig.httpAgent = proxyAgent;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios(axiosConfig) as FetchOptions;
|
||||
|
@ -20,7 +20,8 @@ export async function streamingChatCompletion(
|
||||
messages,
|
||||
temperature,
|
||||
tools = [],
|
||||
parallelToolCalls = true
|
||||
parallelToolCalls = true,
|
||||
proxyServer = ''
|
||||
} = data;
|
||||
|
||||
const client = new OpenAI({
|
||||
@ -28,7 +29,7 @@ export async function streamingChatCompletion(
|
||||
apiKey,
|
||||
fetch: async (input: string | URL | Request, init?: RequestInit) => {
|
||||
|
||||
console.log('openai fetch begin');
|
||||
console.log('openai fetch begin, proxyServer:', proxyServer);
|
||||
|
||||
if (model.startsWith('gemini')) {
|
||||
// 该死的 google
|
||||
@ -38,11 +39,8 @@ export async function streamingChatCompletion(
|
||||
'Authorization': `Bearer ${apiKey}`
|
||||
}
|
||||
}
|
||||
|
||||
console.log('input:', input);
|
||||
console.log('init:', init);
|
||||
|
||||
return await axiosFetch(input, init);
|
||||
return await axiosFetch(input, init, { proxyServer });
|
||||
} else {
|
||||
return await fetch(input, init);
|
||||
}
|
||||
|
@ -135,10 +135,7 @@ export class McpClient {
|
||||
// 调用工具
|
||||
public async callTool(options: { name: string; arguments: Record<string, any>, callToolOption?: any }) {
|
||||
const { callToolOption, ...methodArgs } = options;
|
||||
console.log('methodArgs', methodArgs);
|
||||
console.log('callToolOption', callToolOption);
|
||||
const res = await this.client.callTool(methodArgs, undefined, callToolOption);
|
||||
console.log('callTool res', res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -15,9 +15,6 @@ export class ConnectController {
|
||||
async lookupEnvVar(data: RequestData, webview: PostMessageble) {
|
||||
const { keys } = data;
|
||||
const values = keys.map((key: string) => {
|
||||
// TODO: 在 Windows 上测试
|
||||
console.log(key);
|
||||
console.log(process.env);
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
switch (key) {
|
||||
|
@ -52,7 +52,11 @@ export function loadSetting(): IConfig {
|
||||
|
||||
try {
|
||||
const configData = fs.readFileSync(configPath, 'utf-8');
|
||||
return JSON.parse(configData) as IConfig;
|
||||
const config = JSON.parse(configData) as IConfig;
|
||||
if (!config.LLM_INFO || (Array.isArray(config.LLM_INFO) && config.LLM_INFO.length === 0)) {
|
||||
config.LLM_INFO = llms;
|
||||
}
|
||||
return config;
|
||||
} catch (error) {
|
||||
console.error('Error loading config file, creating new one:', error);
|
||||
return createConfig();
|
||||
|
Loading…
x
Reference in New Issue
Block a user