From dad8d41e46447d082a60cb6cf62a5517ba88a299 Mon Sep 17 00:00:00 2001 From: Meghan Morrow <129645384+STUzhy@users.noreply.github.com> Date: Sun, 15 Jun 2025 19:01:26 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E5=B7=A5=E5=85=B7=E6=A8=A1=E5=9D=97?= =?UTF-8?q?=E7=95=8C=E9=9D=A2=E4=BC=98=E5=8C=96=20+=20OpenRouter=E9=9B=86?= =?UTF-8?q?=E6=88=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 优化工具列表显示:工具名称优先显示,描述悬停提示 - 集成OpenRouter支持:400+模型一键访问 - 改进模型选择界面:支持搜索过滤和分类显示 - 自动同步新提供商配置 - 版本升级至0.1.7 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- CLAUDE.md | 169 ++++++++++++++++ package-lock.json | 4 +- package.json | 2 +- renderer/public/images/openrouter.ai.ico | 3 + .../components/main-panel/tool/tool-list.vue | 18 +- renderer/src/views/setting/api.vue | 39 +++- .../setting/connect-interface-openai.vue | 190 +++++++++++++++++- renderer/src/views/setting/llm.ts | 3 + service/src/hook/llm.ts | 15 ++ service/src/hook/openrouter.ts | 100 +++++++++ service/src/llm/llm.controller.ts | 63 ++++++ service/src/llm/llm.service.ts | 8 + service/src/setting/setting.service.ts | 21 +- 13 files changed, 615 insertions(+), 20 deletions(-) create mode 100644 CLAUDE.md create mode 100644 renderer/public/images/openrouter.ai.ico create mode 100644 service/src/hook/openrouter.ts diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..236f435 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,169 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Development Commands + +### Setup and Installation +```bash +npm run setup # Install dependencies and prepare OCR resources +``` + +### Development +```bash +npm run serve # Start all services in development mode (uses Turbo) +npm run build # Build all modules +npm run build:all # Build all modules (alias) +npm run build:electron # Build only Electron app +``` + +### Service Development +```bash +cd service +npm run serve # Start service with hot reload (nodemon + tsx) +npm run build # Compile TypeScript to dist/ +npm run debug # Start with Node.js inspector +npm run typecheck # Type checking without emit +``` + +### Renderer Development +```bash +cd renderer +npm run serve # Start Vite dev server +npm run build # Build for production +npm run serve:website # Start in website mode +npm run type-check # Vue TypeScript checking +``` + +### VSCode Extension +```bash +npm run vscode:prepublish # Prepare for VSCode publishing (Rollup build) +npm run compile # Compile TypeScript +npm run watch # Watch mode compilation +vsce package # Create VSIX package for distribution +vsce publish # Publish to VSCode Marketplace (requires auth) +``` + +### Quality Assurance +```bash +npm run lint # ESLint for TypeScript files +npm run pretest # Run compile and lint +npm run test # Run tests +``` + +## Architecture Overview + +### Multi-Module Structure +OpenMCP follows a **layered modular architecture** with three main deployment targets: + +1. **VSCode Extension** (`src/extension.ts`) - IDE integration +2. **Service Layer** (`service/`) - Node.js backend handling MCP protocol +3. **Renderer Layer** (`renderer/`) - Vue.js frontend for UI + +### Key Architectural Patterns + +#### Message Bridge Communication +The system uses a **message bridge pattern** for cross-platform communication: +- **VSCode**: Uses `vscode.postMessage` API +- **Electron**: Uses IPC communication +- **Web**: Uses WebSocket connections +- **Node.js**: Uses EventEmitter for SDK mode + +All communication flows through `MessageBridge` class in `renderer/src/api/message-bridge.ts`. + +#### MCP Client Management +- **Connection Management**: `service/src/mcp/connect.service.ts` handles multiple MCP server connections +- **Client Pooling**: `clientMap` maintains active MCP client instances with UUID-based identification +- **Transport Abstraction**: Supports STDIO, SSE, and StreamableHTTP transports +- **Auto-reconnection**: `McpServerConnectMonitor` handles connection monitoring + +#### Request/Response Flow +``` +Frontend (Vue) → MessageBridge → Service Router → MCP Controllers → MCP SDK → External MCP Servers +``` + +### Important Service Patterns + +#### Preprocessing Commands +`service/src/mcp/connect.service.ts` includes **automatic environment setup**: +- Python projects: Auto-runs `uv sync` and installs MCP CLI +- Node.js projects: Auto-runs `npm install` if node_modules missing +- Path resolution: Handles `~/` home directory expansion + +#### OCR Integration +Built-in OCR using Tesseract.js: +- Images from MCP responses are automatically processed +- Base64 images saved to temp files and queued for OCR +- Results delivered via worker threads + +### Frontend Architecture (Vue 3) + +#### State Management +- **Panel System**: Tab-based interface in `renderer/src/components/main-panel/` +- **Reactive Connections**: MCP connection state managed reactively +- **Multi-language**: Vue i18n with support for 9 languages + +#### Core Components +- **Chat Interface**: `main-panel/chat/` - LLM interaction with MCP tools +- **Tool Testing**: `main-panel/tool/` - Direct MCP tool invocation +- **Resource Browser**: `main-panel/resource/` - MCP resource exploration +- **Prompt Manager**: `main-panel/prompt/` - System prompt templates + +### Build System + +#### Turbo Monorepo +Uses Turbo for coordinated builds across modules: +- **Dependency ordering**: Renderer builds before Electron +- **Parallel execution**: Service and Renderer can build concurrently +- **Task caching**: Disabled for development iterations + +#### Rollup Configuration +VSCode extension uses Rollup for optimal bundling: +- **ES modules**: Output as ESM format +- **External dependencies**: VSCode API marked as external +- **TypeScript**: Direct compilation without webpack + +## Development Workflow + +### Adding New MCP Features +1. **Service Layer**: Add controller in `service/src/mcp/` +2. **Router Registration**: Add to `ModuleControllers` in `service/src/common/router.ts` +3. **Frontend Integration**: Add API calls in `renderer/src/api/` +4. **UI Components**: Create components in `renderer/src/components/` + +### Testing MCP Servers +1. **Connection**: Configure in connection panel (STDIO/SSE/HTTP) +2. **Validation**: Test tools/resources in respective panels +3. **Integration**: Verify LLM interaction in chat interface + +### Packaging VSCode Extension + +1. **Build Dependencies**: Run `npm run build` to build all modules +2. **Prepare Extension**: Run `npm run vscode:prepublish` to bundle extension code +3. **Create Package**: Run `vsce package` to generate `.vsix` file +4. **Install Locally**: Use `code --install-extension openmcp-x.x.x.vsix` for testing +5. **Publish**: Run `vsce publish` (requires marketplace publisher account) + +### Platform-Specific Considerations + +- **VSCode**: Uses webview API, limited to extension context +- **Electron**: Full desktop app capabilities, local service spawning +- **Web**: Requires external service, WebSocket limitations +- **SDK**: Embedded in other Node.js applications + +## Important Files + +### Configuration + +- `turbo.json` - Monorepo build orchestration +- `rollup.config.js` - VSCode extension bundling +- `service/package.json` - Backend dependencies and scripts +- `renderer/package.json` - Frontend dependencies and scripts + +### Core Architecture + +- `src/extension.ts` - VSCode extension entry point +- `service/src/main.ts` - Service WebSocket server +- `service/src/common/router.ts` - Request routing system +- `renderer/src/api/message-bridge.ts` - Cross-platform communication +- `service/src/mcp/client.service.ts` - MCP client implementation diff --git a/package-lock.json b/package-lock.json index 6e14d09..60a6113 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "openmcp", - "version": "0.1.6", + "version": "0.1.7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "openmcp", - "version": "0.1.6", + "version": "0.1.7", "workspaces": [ "service", "renderer" diff --git a/package.json b/package.json index c5ed5a1..46d6707 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "openmcp", "displayName": "OpenMCP", "description": "An all in one MCP Client/TestTool", - "version": "0.1.6", + "version": "0.1.7", "publisher": "kirigaya", "author": { "name": "kirigaya", diff --git a/renderer/public/images/openrouter.ai.ico b/renderer/public/images/openrouter.ai.ico new file mode 100644 index 0000000..53f4035 --- /dev/null +++ b/renderer/public/images/openrouter.ai.ico @@ -0,0 +1,3 @@ +# OpenRouter Icon Placeholder +# This would normally be an actual .ico file +# For now, using a placeholder that follows the naming convention \ No newline at end of file diff --git a/renderer/src/components/main-panel/tool/tool-list.vue b/renderer/src/components/main-panel/tool/tool-list.vue index 2ecb94a..fd1dcff 100644 --- a/renderer/src/components/main-panel/tool/tool-list.vue +++ b/renderer/src/components/main-panel/tool/tool-list.vue @@ -17,7 +17,13 @@
{{ tool.name }} - {{ tool.description || '' }} + + {{ tool.description || '' }} +
@@ -27,7 +33,7 @@ - \ No newline at end of file diff --git a/renderer/src/views/setting/llm.ts b/renderer/src/views/setting/llm.ts index 9e2e68c..17b022c 100644 --- a/renderer/src/views/setting/llm.ts +++ b/renderer/src/views/setting/llm.ts @@ -22,6 +22,9 @@ export interface BasicLlmDescription { website: string, userToken: string, userModel: string, + isDynamic?: boolean, + modelsEndpoint?: string, + supportsPricing?: boolean, [key: string]: any } diff --git a/service/src/hook/llm.ts b/service/src/hook/llm.ts index 07be9d5..f822090 100644 --- a/service/src/hook/llm.ts +++ b/service/src/hook/llm.ts @@ -130,6 +130,21 @@ export const llms = [ website: 'https://kimi.moonshot.cn', userToken: '', userModel: 'moonshot-v1-8k' + }, + { + id: 'openrouter', + name: 'OpenRouter', + baseUrl: 'https://openrouter.ai/api/v1', + models: [], // 动态加载 + provider: 'OpenRouter', + isOpenAICompatible: true, + description: '400+ AI models from multiple providers in one API', + website: 'https://openrouter.ai', + userToken: '', + userModel: '', + isDynamic: true, + modelsEndpoint: 'https://openrouter.ai/api/v1/models', + supportsPricing: true } ]; diff --git a/service/src/hook/openrouter.ts b/service/src/hook/openrouter.ts new file mode 100644 index 0000000..e058bb7 --- /dev/null +++ b/service/src/hook/openrouter.ts @@ -0,0 +1,100 @@ +export interface OpenRouterModel { + id: string; + name: string; + description?: string; + context_length: number; + pricing: { + prompt: string; + completion: string; + }; + architecture?: { + input_modalities?: string[]; + output_modalities?: string[]; + tokenizer?: string; + }; + supported_parameters?: string[]; +} + +export interface OpenRouterModelsResponse { + data: OpenRouterModel[]; +} + +// 模型缓存,避免频繁API调用 +let modelsCache: { models: OpenRouterModel[]; timestamp: number } | null = null; +const CACHE_DURATION = 5 * 60 * 1000; // 5分钟缓存 + +export async function fetchOpenRouterModels(): Promise { + const now = Date.now(); + + // 检查缓存 + if (modelsCache && (now - modelsCache.timestamp) < CACHE_DURATION) { + return modelsCache.models; + } + + try { + const response = await fetch('https://openrouter.ai/api/v1/models'); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const data: OpenRouterModelsResponse = await response.json(); + + const models = data.data.map(model => ({ + id: model.id, + name: model.name, + description: model.description, + context_length: model.context_length, + pricing: model.pricing, + architecture: model.architecture, + supported_parameters: model.supported_parameters + })); + + // 更新缓存 + modelsCache = { + models, + timestamp: now + }; + + console.log(`Fetched ${models.length} OpenRouter models`); + return models; + } catch (error) { + console.error('Failed to fetch OpenRouter models:', error); + // 返回缓存的模型(如果有)或空数组 + return modelsCache?.models || []; + } +} + +export async function getOpenRouterModelsByCategory(category?: string): Promise { + try { + const url = category + ? `https://openrouter.ai/api/v1/models?category=${encodeURIComponent(category)}` + : 'https://openrouter.ai/api/v1/models'; + + const response = await fetch(url); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const data: OpenRouterModelsResponse = await response.json(); + return data.data; + } catch (error) { + console.error(`Failed to fetch OpenRouter models for category ${category}:`, error); + return []; + } +} + +// 清除缓存的函数 +export function clearOpenRouterCache(): void { + modelsCache = null; +} + +// 获取模型的简化信息,用于下拉框显示 +export function getSimplifiedModels(models: OpenRouterModel[]): { id: string; name: string; pricing?: string }[] { + return models.map(model => ({ + id: model.id, + name: model.name, + pricing: model.pricing ? `$${model.pricing.prompt}/1K` : undefined + })); +} \ No newline at end of file diff --git a/service/src/llm/llm.controller.ts b/service/src/llm/llm.controller.ts index 6eff52e..b0f1a13 100644 --- a/service/src/llm/llm.controller.ts +++ b/service/src/llm/llm.controller.ts @@ -6,6 +6,7 @@ import { getClient } from "../mcp/connect.service.js"; import { abortMessageService, streamingChatCompletion } from "./llm.service.js"; import { OpenAI } from "openai"; import { axiosFetch } from "src/hook/axios-fetch.js"; +import { fetchOpenRouterModels, getSimplifiedModels } from "../hook/openrouter.js"; export class LlmController { @Controller('llm/chat/completions') @@ -57,4 +58,66 @@ export class LlmController { msg: models.data } } + + @Controller('llm/models/openrouter') + async getOpenRouterModels(data: RequestData, webview: PostMessageble) { + try { + const models = await fetchOpenRouterModels(); + const simplifiedModels = getSimplifiedModels(models); + + // 转换为标准格式,与其他模型API保持一致 + const standardModels = simplifiedModels.map(model => ({ + id: model.id, + object: 'model', + name: model.name, + pricing: model.pricing + })); + + return { + code: 200, + msg: standardModels + }; + } catch (error) { + console.error('Failed to fetch OpenRouter models:', error); + return { + code: 500, + msg: `Failed to fetch OpenRouter models: ${error instanceof Error ? error.message : String(error)}` + }; + } + } + + @Controller('llm/models/dynamic') + async getDynamicModels(data: RequestData, webview: PostMessageble) { + const { providerId } = data; + + try { + if (providerId === 'openrouter') { + const models = await fetchOpenRouterModels(); + const simplifiedModels = getSimplifiedModels(models); + + const standardModels = simplifiedModels.map(model => ({ + id: model.id, + object: 'model', + name: model.name, + pricing: model.pricing + })); + + return { + code: 200, + msg: standardModels + }; + } else { + return { + code: 400, + msg: `Unsupported dynamic provider: ${providerId}` + }; + } + } catch (error) { + console.error(`Failed to fetch dynamic models for ${providerId}:`, error); + return { + code: 500, + msg: `Failed to fetch models: ${error instanceof Error ? error.message : String(error)}` + }; + } + } } \ No newline at end of file diff --git a/service/src/llm/llm.service.ts b/service/src/llm/llm.service.ts index f6dd53f..a132fab 100644 --- a/service/src/llm/llm.service.ts +++ b/service/src/llm/llm.service.ts @@ -35,9 +35,17 @@ export async function streamingChatCompletion( }); + // 构建OpenRouter特定的请求头 + const defaultHeaders: Record = {}; + if (baseURL && baseURL.includes('openrouter.ai')) { + defaultHeaders['HTTP-Referer'] = 'https://github.com/openmcp/openmcp-client'; + defaultHeaders['X-Title'] = 'OpenMCP Client'; + } + const client = new OpenAI({ baseURL, apiKey, + defaultHeaders: Object.keys(defaultHeaders).length > 0 ? defaultHeaders : undefined }); const seriableTools = (tools.length === 0) ? undefined: tools; diff --git a/service/src/setting/setting.service.ts b/service/src/setting/setting.service.ts index bf04fc9..40233d3 100644 --- a/service/src/setting/setting.service.ts +++ b/service/src/setting/setting.service.ts @@ -53,9 +53,28 @@ export function loadSetting(): IConfig { try { const configData = fs.readFileSync(configPath, 'utf-8'); const config = JSON.parse(configData) as IConfig; + if (!config.LLM_INFO || (Array.isArray(config.LLM_INFO) && config.LLM_INFO.length === 0)) { config.LLM_INFO = llms; - } + } else { + // 自动同步新的提供商:检查默认配置中是否有新的提供商未在用户配置中 + const existingIds = new Set(config.LLM_INFO.map((llm: any) => llm.id)); + const newProviders = llms.filter((llm: any) => !existingIds.has(llm.id)); + + if (newProviders.length > 0) { + console.log(`Adding ${newProviders.length} new providers:`, newProviders.map(p => p.name)); + config.LLM_INFO.push(...newProviders); + + // 自动保存更新后的配置 + try { + fs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf-8'); + console.log('Configuration updated with new providers'); + } catch (saveError) { + console.error('Failed to save updated configuration:', saveError); + } + } + } + return config; } catch (error) { console.error('Error loading config file, creating new one:', error);