support google gemini
This commit is contained in:
parent
1b85207b7f
commit
f8447cadb6
@ -1,8 +1,8 @@
|
|||||||
@font-face {
|
@font-face {
|
||||||
font-family: "iconfont"; /* Project id 4870215 */
|
font-family: "iconfont"; /* Project id 4870215 */
|
||||||
src: url('iconfont.woff2?t=1747820198035') format('woff2'),
|
src: url('iconfont.woff2?t=1748859145515') format('woff2'),
|
||||||
url('iconfont.woff?t=1747820198035') format('woff'),
|
url('iconfont.woff?t=1748859145515') format('woff'),
|
||||||
url('iconfont.ttf?t=1747820198035') format('truetype');
|
url('iconfont.ttf?t=1748859145515') format('truetype');
|
||||||
}
|
}
|
||||||
|
|
||||||
.iconfont {
|
.iconfont {
|
||||||
@ -13,6 +13,10 @@
|
|||||||
-moz-osx-font-smoothing: grayscale;
|
-moz-osx-font-smoothing: grayscale;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.icon-proxy:before {
|
||||||
|
content: "\e723";
|
||||||
|
}
|
||||||
|
|
||||||
.icon-parallel:before {
|
.icon-parallel:before {
|
||||||
content: "\e61d";
|
content: "\e61d";
|
||||||
}
|
}
|
||||||
|
Binary file not shown.
@ -9,9 +9,10 @@ import { ElMessage } from "element-plus";
|
|||||||
import { handleToolCalls, type ToolCallResult } from "./handle-tool-calls";
|
import { handleToolCalls, type ToolCallResult } from "./handle-tool-calls";
|
||||||
import { getPlatform } from "@/api/platform";
|
import { getPlatform } from "@/api/platform";
|
||||||
import { getSystemPrompt } from "../chat-box/options/system-prompt";
|
import { getSystemPrompt } from "../chat-box/options/system-prompt";
|
||||||
|
import { mcpSetting } from "@/hook/mcp";
|
||||||
|
|
||||||
export type ChatCompletionChunk = OpenAI.Chat.Completions.ChatCompletionChunk;
|
export type ChatCompletionChunk = OpenAI.Chat.Completions.ChatCompletionChunk;
|
||||||
export type ChatCompletionCreateParamsBase = OpenAI.Chat.Completions.ChatCompletionCreateParams & { id?: string };
|
export type ChatCompletionCreateParamsBase = OpenAI.Chat.Completions.ChatCompletionCreateParams & { id?: string, proxyServer?: string };
|
||||||
export interface TaskLoopOptions {
|
export interface TaskLoopOptions {
|
||||||
maxEpochs?: number;
|
maxEpochs?: number;
|
||||||
maxJsonParseRetry?: number;
|
maxJsonParseRetry?: number;
|
||||||
@ -79,13 +80,18 @@ export class TaskLoop {
|
|||||||
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
|
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
|
||||||
|
|
||||||
if (toolCall) {
|
if (toolCall) {
|
||||||
const currentCall = this.streamingToolCalls.value[toolCall.index];
|
if (toolCall.index === undefined || toolCall.index === null) {
|
||||||
|
console.warn('tool_call.index is undefined or null');
|
||||||
|
}
|
||||||
|
|
||||||
|
const index = toolCall.index || 0;
|
||||||
|
const currentCall = this.streamingToolCalls.value[index];
|
||||||
|
|
||||||
if (currentCall === undefined) {
|
if (currentCall === undefined) {
|
||||||
// 新的工具调用开始
|
// 新的工具调用开始
|
||||||
this.streamingToolCalls.value[toolCall.index] = {
|
this.streamingToolCalls.value[index] = {
|
||||||
id: toolCall.id,
|
id: toolCall.id,
|
||||||
index: toolCall.index,
|
index,
|
||||||
type: 'function',
|
type: 'function',
|
||||||
function: {
|
function: {
|
||||||
name: toolCall.function?.name || '',
|
name: toolCall.function?.name || '',
|
||||||
@ -124,6 +130,8 @@ export class TaskLoop {
|
|||||||
// data.code 一定为 200,否则不会走这个 route
|
// data.code 一定为 200,否则不会走这个 route
|
||||||
const { chunk } = data.msg as { chunk: ChatCompletionChunk };
|
const { chunk } = data.msg as { chunk: ChatCompletionChunk };
|
||||||
|
|
||||||
|
console.log(chunk);
|
||||||
|
|
||||||
// 处理增量的 content 和 tool_calls
|
// 处理增量的 content 和 tool_calls
|
||||||
this.handleChunkDeltaContent(chunk);
|
this.handleChunkDeltaContent(chunk);
|
||||||
this.handleChunkDeltaToolCalls(chunk);
|
this.handleChunkDeltaToolCalls(chunk);
|
||||||
@ -181,6 +189,7 @@ export class TaskLoop {
|
|||||||
const temperature = tabStorage.settings.temperature;
|
const temperature = tabStorage.settings.temperature;
|
||||||
const tools = getToolSchema(tabStorage.settings.enableTools);
|
const tools = getToolSchema(tabStorage.settings.enableTools);
|
||||||
const parallelToolCalls = tabStorage.settings.parallelToolCalls;
|
const parallelToolCalls = tabStorage.settings.parallelToolCalls;
|
||||||
|
const proxyServer = mcpSetting.proxyServer || '';
|
||||||
|
|
||||||
const userMessages = [];
|
const userMessages = [];
|
||||||
|
|
||||||
@ -211,6 +220,7 @@ export class TaskLoop {
|
|||||||
tools,
|
tools,
|
||||||
parallelToolCalls,
|
parallelToolCalls,
|
||||||
messages: userMessages,
|
messages: userMessages,
|
||||||
|
proxyServer
|
||||||
} as ChatCompletionCreateParamsBase;
|
} as ChatCompletionCreateParamsBase;
|
||||||
|
|
||||||
return chatData;
|
return chatData;
|
||||||
@ -396,7 +406,7 @@ export class TaskLoop {
|
|||||||
tabStorage.messages.push({
|
tabStorage.messages.push({
|
||||||
role: 'tool',
|
role: 'tool',
|
||||||
index: toolCall.index || 0,
|
index: toolCall.index || 0,
|
||||||
tool_call_id: toolCall.id || toolCall.function.name,
|
tool_call_id: toolCall.id || '',
|
||||||
content: toolCallResult.content,
|
content: toolCallResult.content,
|
||||||
extraInfo: {
|
extraInfo: {
|
||||||
created: Date.now(),
|
created: Date.now(),
|
||||||
|
@ -33,4 +33,5 @@ export function normaliseJavascriptType(type: string) {
|
|||||||
|
|
||||||
export const mcpSetting = reactive({
|
export const mcpSetting = reactive({
|
||||||
timeout: 60,
|
timeout: 60,
|
||||||
|
proxyServer: '',
|
||||||
});
|
});
|
@ -20,6 +20,7 @@ export async function loadSetting() {
|
|||||||
llmManager.currentModelIndex = persistConfig.MODEL_INDEX || 0;
|
llmManager.currentModelIndex = persistConfig.MODEL_INDEX || 0;
|
||||||
I18n.global.locale.value = persistConfig.LANG || 'zh';
|
I18n.global.locale.value = persistConfig.LANG || 'zh';
|
||||||
mcpSetting.timeout = persistConfig.MCP_TIMEOUT_SEC || 60;
|
mcpSetting.timeout = persistConfig.MCP_TIMEOUT_SEC || 60;
|
||||||
|
mcpSetting.proxyServer = persistConfig.PROXY_SERVER || '';
|
||||||
|
|
||||||
persistConfig.LLM_INFO.forEach((element: any) => {
|
persistConfig.LLM_INFO.forEach((element: any) => {
|
||||||
llms.push(element);
|
llms.push(element);
|
||||||
@ -51,7 +52,8 @@ export function saveSetting(saveHandler?: () => void) {
|
|||||||
MODEL_INDEX: llmManager.currentModelIndex,
|
MODEL_INDEX: llmManager.currentModelIndex,
|
||||||
LLM_INFO: JSON.parse(JSON.stringify(llms)),
|
LLM_INFO: JSON.parse(JSON.stringify(llms)),
|
||||||
LANG: I18n.global.locale.value,
|
LANG: I18n.global.locale.value,
|
||||||
MCP_TIMEOUT_SEC: mcpSetting.timeout
|
MCP_TIMEOUT_SEC: mcpSetting.timeout,
|
||||||
|
PROXY_SERVER: mcpSetting.proxyServer
|
||||||
};
|
};
|
||||||
|
|
||||||
bridge.addCommandListener('setting/save', data => {
|
bridge.addCommandListener('setting/save', data => {
|
||||||
|
@ -156,5 +156,6 @@
|
|||||||
"error": "خطأ",
|
"error": "خطأ",
|
||||||
"feedback": "تعليقات",
|
"feedback": "تعليقات",
|
||||||
"waiting-mcp-server": "في انتظار استجابة خادم MCP",
|
"waiting-mcp-server": "في انتظار استجابة خادم MCP",
|
||||||
"parallel-tool-calls": "السماح للنموذج باستدعاء أدوات متعددة في رد واحد"
|
"parallel-tool-calls": "السماح للنموذج باستدعاء أدوات متعددة في رد واحد",
|
||||||
|
"proxy-server": "خادم وكيل"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "Fehler",
|
"error": "Fehler",
|
||||||
"feedback": "Feedback",
|
"feedback": "Feedback",
|
||||||
"waiting-mcp-server": "Warten auf Antwort vom MCP-Server",
|
"waiting-mcp-server": "Warten auf Antwort vom MCP-Server",
|
||||||
"parallel-tool-calls": "Erlauben Sie dem Modell, mehrere Tools in einer einzigen Antwort aufzurufen"
|
"parallel-tool-calls": "Erlauben Sie dem Modell, mehrere Tools in einer einzigen Antwort aufzurufen",
|
||||||
|
"proxy-server": "Proxy-Server"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "Error",
|
"error": "Error",
|
||||||
"feedback": "Feedback",
|
"feedback": "Feedback",
|
||||||
"waiting-mcp-server": "Waiting for MCP server response",
|
"waiting-mcp-server": "Waiting for MCP server response",
|
||||||
"parallel-tool-calls": "Allow the model to call multiple tools in a single reply"
|
"parallel-tool-calls": "Allow the model to call multiple tools in a single reply",
|
||||||
|
"proxy-server": "Proxy server"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "Erreur",
|
"error": "Erreur",
|
||||||
"feedback": "Retour",
|
"feedback": "Retour",
|
||||||
"waiting-mcp-server": "En attente de la réponse du serveur MCP",
|
"waiting-mcp-server": "En attente de la réponse du serveur MCP",
|
||||||
"parallel-tool-calls": "Permettre au modèle d'appeler plusieurs outils en une seule réponse"
|
"parallel-tool-calls": "Permettre au modèle d'appeler plusieurs outils en une seule réponse",
|
||||||
|
"proxy-server": "Serveur proxy"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "エラー",
|
"error": "エラー",
|
||||||
"feedback": "フィードバック",
|
"feedback": "フィードバック",
|
||||||
"waiting-mcp-server": "MCPサーバーの応答を待機中",
|
"waiting-mcp-server": "MCPサーバーの応答を待機中",
|
||||||
"parallel-tool-calls": "モデルが単一の返信で複数のツールを呼び出すことを許可する"
|
"parallel-tool-calls": "モデルが単一の返信で複数のツールを呼び出すことを許可する",
|
||||||
|
"proxy-server": "プロキシサーバー"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "오류",
|
"error": "오류",
|
||||||
"feedback": "피드백",
|
"feedback": "피드백",
|
||||||
"waiting-mcp-server": "MCP 서버 응답 대기 중",
|
"waiting-mcp-server": "MCP 서버 응답 대기 중",
|
||||||
"parallel-tool-calls": "모델이 단일 응답에서 여러 도구를 호출할 수 있도록 허용"
|
"parallel-tool-calls": "모델이 단일 응답에서 여러 도구를 호출할 수 있도록 허용",
|
||||||
|
"proxy-server": "프록시 서버"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "Ошибка",
|
"error": "Ошибка",
|
||||||
"feedback": "Обратная связь",
|
"feedback": "Обратная связь",
|
||||||
"waiting-mcp-server": "Ожидание ответа от сервера MCP",
|
"waiting-mcp-server": "Ожидание ответа от сервера MCP",
|
||||||
"parallel-tool-calls": "Разрешить модели вызывать несколько инструментов в одном ответе"
|
"parallel-tool-calls": "Разрешить модели вызывать несколько инструментов в одном ответе",
|
||||||
|
"proxy-server": "Прокси-сервер"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "错误",
|
"error": "错误",
|
||||||
"feedback": "反馈",
|
"feedback": "反馈",
|
||||||
"waiting-mcp-server": "等待 MCP 服务器响应",
|
"waiting-mcp-server": "等待 MCP 服务器响应",
|
||||||
"parallel-tool-calls": "允许模型在单轮回复中调用多个工具"
|
"parallel-tool-calls": "允许模型在单轮回复中调用多个工具",
|
||||||
|
"proxy-server": "代理服务器"
|
||||||
}
|
}
|
@ -156,5 +156,6 @@
|
|||||||
"error": "錯誤",
|
"error": "錯誤",
|
||||||
"feedback": "反饋",
|
"feedback": "反饋",
|
||||||
"waiting-mcp-server": "等待MCP伺服器響應",
|
"waiting-mcp-server": "等待MCP伺服器響應",
|
||||||
"parallel-tool-calls": "允許模型在單輪回覆中調用多個工具"
|
"parallel-tool-calls": "允許模型在單輪回覆中調用多個工具",
|
||||||
|
"proxy-server": "代理伺服器"
|
||||||
}
|
}
|
@ -27,6 +27,20 @@
|
|||||||
@change="safeSaveSetting" />
|
@change="safeSaveSetting" />
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div class="setting-option">
|
||||||
|
<span>
|
||||||
|
<span class="iconfont icon-proxy"></span>
|
||||||
|
<span class="option-title">{{ t('proxy-server') }}</span>
|
||||||
|
</span>
|
||||||
|
<div style="width: 200px;">
|
||||||
|
<el-input
|
||||||
|
v-model="mcpSetting.proxyServer"
|
||||||
|
:placeholder="'http://localhost:7890'"
|
||||||
|
@input="safeSaveSetting"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</template>
|
</template>
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import axios, { AxiosResponse } from "axios";
|
import axios, { AxiosProxyConfig } from "axios";
|
||||||
|
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||||
|
|
||||||
interface FetchOptions {
|
interface FetchOptions {
|
||||||
method?: string;
|
method?: string;
|
||||||
@ -185,8 +186,18 @@ function adaptResponse(axiosResponse: FetchOptions): FetchResponse {
|
|||||||
/**
|
/**
|
||||||
* @description 主函数 - 用 axios 实现 fetch
|
* @description 主函数 - 用 axios 实现 fetch
|
||||||
*/
|
*/
|
||||||
export async function axiosFetch(url: any, options: any): Promise<any> {
|
export async function axiosFetch(input: any, init: any, requestOption: { proxyServer?: string } = {}): Promise<any> {
|
||||||
const axiosConfig = adaptRequestOptions(url, options);
|
const axiosConfig = adaptRequestOptions(input, init);
|
||||||
|
|
||||||
|
const {
|
||||||
|
proxyServer = ''
|
||||||
|
} = requestOption;
|
||||||
|
|
||||||
|
if (proxyServer) {
|
||||||
|
const proxyAgent = new HttpsProxyAgent(proxyServer);
|
||||||
|
axiosConfig.httpsAgent = proxyAgent;
|
||||||
|
axiosConfig.httpAgent = proxyAgent;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const response = await axios(axiosConfig) as FetchOptions;
|
const response = await axios(axiosConfig) as FetchOptions;
|
||||||
|
@ -20,7 +20,8 @@ export async function streamingChatCompletion(
|
|||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
tools = [],
|
tools = [],
|
||||||
parallelToolCalls = true
|
parallelToolCalls = true,
|
||||||
|
proxyServer = ''
|
||||||
} = data;
|
} = data;
|
||||||
|
|
||||||
const client = new OpenAI({
|
const client = new OpenAI({
|
||||||
@ -28,7 +29,7 @@ export async function streamingChatCompletion(
|
|||||||
apiKey,
|
apiKey,
|
||||||
fetch: async (input: string | URL | Request, init?: RequestInit) => {
|
fetch: async (input: string | URL | Request, init?: RequestInit) => {
|
||||||
|
|
||||||
console.log('openai fetch begin');
|
console.log('openai fetch begin, proxyServer:', proxyServer);
|
||||||
|
|
||||||
if (model.startsWith('gemini')) {
|
if (model.startsWith('gemini')) {
|
||||||
// 该死的 google
|
// 该死的 google
|
||||||
@ -39,10 +40,7 @@ export async function streamingChatCompletion(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log('input:', input);
|
return await axiosFetch(input, init, { proxyServer });
|
||||||
console.log('init:', init);
|
|
||||||
|
|
||||||
return await axiosFetch(input, init);
|
|
||||||
} else {
|
} else {
|
||||||
return await fetch(input, init);
|
return await fetch(input, init);
|
||||||
}
|
}
|
||||||
|
@ -135,10 +135,7 @@ export class McpClient {
|
|||||||
// 调用工具
|
// 调用工具
|
||||||
public async callTool(options: { name: string; arguments: Record<string, any>, callToolOption?: any }) {
|
public async callTool(options: { name: string; arguments: Record<string, any>, callToolOption?: any }) {
|
||||||
const { callToolOption, ...methodArgs } = options;
|
const { callToolOption, ...methodArgs } = options;
|
||||||
console.log('methodArgs', methodArgs);
|
|
||||||
console.log('callToolOption', callToolOption);
|
|
||||||
const res = await this.client.callTool(methodArgs, undefined, callToolOption);
|
const res = await this.client.callTool(methodArgs, undefined, callToolOption);
|
||||||
console.log('callTool res', res);
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -15,9 +15,6 @@ export class ConnectController {
|
|||||||
async lookupEnvVar(data: RequestData, webview: PostMessageble) {
|
async lookupEnvVar(data: RequestData, webview: PostMessageble) {
|
||||||
const { keys } = data;
|
const { keys } = data;
|
||||||
const values = keys.map((key: string) => {
|
const values = keys.map((key: string) => {
|
||||||
// TODO: 在 Windows 上测试
|
|
||||||
console.log(key);
|
|
||||||
console.log(process.env);
|
|
||||||
|
|
||||||
if (process.platform === 'win32') {
|
if (process.platform === 'win32') {
|
||||||
switch (key) {
|
switch (key) {
|
||||||
|
@ -52,7 +52,11 @@ export function loadSetting(): IConfig {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const configData = fs.readFileSync(configPath, 'utf-8');
|
const configData = fs.readFileSync(configPath, 'utf-8');
|
||||||
return JSON.parse(configData) as IConfig;
|
const config = JSON.parse(configData) as IConfig;
|
||||||
|
if (!config.LLM_INFO || (Array.isArray(config.LLM_INFO) && config.LLM_INFO.length === 0)) {
|
||||||
|
config.LLM_INFO = llms;
|
||||||
|
}
|
||||||
|
return config;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error loading config file, creating new one:', error);
|
console.error('Error loading config file, creating new one:', error);
|
||||||
return createConfig();
|
return createConfig();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user