更新文档主页为英文;完成英文翻译

This commit is contained in:
锦恢 2025-06-11 19:34:17 +08:00
parent 73efaa027a
commit bceb242747
145 changed files with 1382 additions and 1486 deletions

BIN
.DS_Store vendored

Binary file not shown.

BIN
.vitepress/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -72,16 +72,16 @@ export default withMermaid({
locales: {
root: {
label: '中文',
lang: 'zh',
themeConfig: zhConfig
},
en: {
label: 'English',
lang: 'en',
link: '/en/',
themeConfig: enConfig
},
zh: {
label: '简体中文',
lang: 'zh',
link: '/zh/',
themeConfig: zhConfig
},
ja: {
label: '日本語',
lang: 'ja',

View File

@ -10,7 +10,7 @@ export default {
title: 'Introduction',
description: 'What you need to know about MCP and OpenMCP...',
icon: 'openmcp',
link: '/en/plugin-tutorial/'
link: '/plugin-tutorial/'
}
},
{
@ -19,7 +19,7 @@ export default {
title: 'Quick Start',
description: 'Understand the basic concept of OpenMCP through an example',
icon: 'quick-fill',
link: '/en/plugin-tutorial/quick-start/'
link: '/plugin-tutorial/quick-start/'
}
},
{
@ -28,7 +28,7 @@ export default {
title: 'OpenMCP User Guide',
description: 'Basic usage of the OpenMCP Client',
icon: 'shiyongshouce',
link: '/en/plugin-tutorial/usage/connect-mcp'
link: '/plugin-tutorial/usage/connect-mcp'
}
},
{
@ -37,7 +37,7 @@ export default {
title: 'MCP Server Examples',
description: 'Examples of MCP servers developed in various languages and models',
icon: 'yibangonggongyusuan',
link: '/en/plugin-tutorial/examples/mcp-examples'
link: '/plugin-tutorial/examples/mcp-examples'
}
},
{
@ -46,13 +46,13 @@ export default {
title: 'FAQ',
description: 'Answers to your questions and solutions to your problems',
icon: 'yijianchuli',
link: '/en/plugin-tutorial/faq/help'
link: '/plugin-tutorial/faq/help'
}
},
]
},
{ text: 'SDK', link: '/en/sdk-tutorial/' },
{ text: 'SDK', link: '/sdk-tutorial/' },
{
text: 'More',
items: [
@ -62,7 +62,7 @@ export default {
title: 'Changelog',
description: 'View the project update history',
icon: 'a-yusuan2',
link: '/en/preview/changelog'
link: '/preview/changelog'
}
},
{
@ -71,7 +71,7 @@ export default {
title: 'Join OpenMCP',
description: 'Learn how to participate in the development and maintenance of OpenMCP',
icon: 'shujuzhongxin',
link: '/en/preview/join'
link: '/preview/join'
}
},
{
@ -80,7 +80,7 @@ export default {
title: 'OpenMCP Contributors',
description: 'About the people who contributed to OpenMCP',
icon: 'heike',
link: '/en/preview/contributors'
link: '/preview/contributors'
}
},
{
@ -89,78 +89,78 @@ export default {
title: 'Resource Channel',
description: 'Access resources and information related to the project',
icon: 'xinxiang',
link: '/en/preview/channel'
link: '/preview/channel'
}
}
]
}
],
sidebar: {
'/en/plugin-tutorial/': [
'/plugin-tutorial/': [
{
text: 'Overview',
items: [
{ text: 'Introduction to OpenMCP', link: '/en/plugin-tutorial/index' },
{ text: 'What is MCP?', link: '/en/plugin-tutorial/what-is-mcp' },
{ text: 'Basic Concepts of MCP', link: '/en/plugin-tutorial/concept' }
{ text: 'Introduction to OpenMCP', link: '/plugin-tutorial/index' },
{ text: 'What is MCP?', link: '/plugin-tutorial/what-is-mcp' },
{ text: 'Basic Concepts of MCP', link: '/plugin-tutorial/concept' }
]
},
{
text: 'Quick Start',
items: [
{ text: 'Quick Start', link: '/en/plugin-tutorial/quick-start' },
{ text: 'Install OpenMCP', link: '/en/plugin-tutorial/quick-start/acquire-openmcp' },
{ text: 'Your First MCP', link: '/en/plugin-tutorial/quick-start/first-mcp' },
{ text: 'Quick Debugging of MCP', link: '/en/plugin-tutorial/quick-start/quick-debug' },
{ text: 'Throw it into an LLM and test it!', link: '/en/plugin-tutorial/quick-start/put-into-llm' }
{ text: 'Quick Start', link: '/plugin-tutorial/quick-start' },
{ text: 'Install OpenMCP', link: '/plugin-tutorial/quick-start/acquire-openmcp' },
{ text: 'Your First MCP', link: '/plugin-tutorial/quick-start/first-mcp' },
{ text: 'Quick Debugging of MCP', link: '/plugin-tutorial/quick-start/quick-debug' },
{ text: 'Throw it into an LLM and test it!', link: '/plugin-tutorial/quick-start/put-into-llm' }
]
},
{
text: 'User Guide',
items: [
{ text: 'UI Color Settings', link: '/en/plugin-tutorial/usage/ui-color' },
{ text: 'Connect to MCP Server', link: '/en/plugin-tutorial/usage/connect-mcp' },
{ text: 'Debug tools, resources, and prompts', link: '/en/plugin-tutorial/usage/debug' },
{ text: 'Connect to LLM', link: '/en/plugin-tutorial/usage/connect-llm' },
{ text: 'Test Your MCP with an LLM', link: '/en/plugin-tutorial/usage/test-with-llm' },
{ text: 'Connect to Multiple MCP Services', link: '/en/plugin-tutorial/usage/multi-server' },
{ text: 'Distribute Experiment Results', link: '/en/plugin-tutorial/usage/distribute-result' },
{ text: 'Implement SSE Authenticator', link: '/en/plugin-tutorial/usage/sse-oauth2' }
{ text: 'UI Color Settings', link: '/plugin-tutorial/usage/ui-color' },
{ text: 'Connect to MCP Server', link: '/plugin-tutorial/usage/connect-mcp' },
{ text: 'Debug tools, resources, and prompts', link: '/plugin-tutorial/usage/debug' },
{ text: 'Connect to LLM', link: '/plugin-tutorial/usage/connect-llm' },
{ text: 'Test Your MCP with an LLM', link: '/plugin-tutorial/usage/test-with-llm' },
{ text: 'Connect to Multiple MCP Services', link: '/plugin-tutorial/usage/multi-server' },
{ text: 'Distribute Experiment Results', link: '/plugin-tutorial/usage/distribute-result' },
{ text: 'Implement SSE Authenticator', link: '/plugin-tutorial/usage/sse-oauth2' }
]
},
{
text: 'Development Examples',
items: [
{ text: 'MCP Server Development Examples', link: '/en/plugin-tutorial/examples/mcp-examples' },
{ text: 'Example 1: Weather Info MCP in Python (STDIO)', link: '/en/plugin-tutorial/examples/python-simple-stdio' },
{ text: 'Example 2: Read-only Neo4j MCP in Go (SSE)', link: '/en/plugin-tutorial/examples/go-neo4j-sse' },
{ text: 'Example 3: Read-only Document DB MCP in Java (HTTP)', link: '/en/plugin-tutorial/examples/java-es-http' },
{ text: 'Example 4: Super Web Crawler MCP in TypeScript using crawl4ai (STDIO)', link: '/en/plugin-tutorial/examples/typescript-crawl4ai-stdio' },
{ text: 'Example 5: Generic Form Filling MCP in Python (STDIO)', link: '/en/plugin-tutorial/examples/python-form-stdio' },
{ text: 'Example 6: Blender-based MCP in Python (STDIO)', link: '/en/plugin-tutorial/examples/python-blender-stdio' },
{ text: 'Example 7: Cadence EDA MCP in Python (STDIO)', link: '/en/plugin-tutorial/examples/python-cadence-stdio' }
{ text: 'MCP Server Development Examples', link: '/plugin-tutorial/examples/mcp-examples' },
{ text: 'Example 1: Weather Info MCP in Python (STDIO)', link: '/plugin-tutorial/examples/python-simple-stdio' },
{ text: 'Example 2: Read-only Neo4j MCP in Go (SSE)', link: '/plugin-tutorial/examples/go-neo4j-sse' },
{ text: 'Example 3: Read-only Document DB MCP in Java (HTTP)', link: '/plugin-tutorial/examples/java-es-http' },
{ text: 'Example 4: Super Web Crawler MCP in TypeScript using crawl4ai (STDIO)', link: '/plugin-tutorial/examples/typescript-crawl4ai-stdio' },
{ text: 'Example 5: Generic Form Filling MCP in Python (STDIO)', link: '/plugin-tutorial/examples/python-form-stdio' },
{ text: 'Example 6: Blender-based MCP in Python (STDIO)', link: '/plugin-tutorial/examples/python-blender-stdio' },
{ text: 'Example 7: Cadence EDA MCP in Python (STDIO)', link: '/plugin-tutorial/examples/python-cadence-stdio' }
]
},
{
text: 'FAQ',
items: [
{ text: 'Help', link: '/en/plugin-tutorial/faq/help' }
{ text: 'Help', link: '/plugin-tutorial/faq/help' }
]
}
],
'/en/sdk-tutorial/': [
'/sdk-tutorial/': [
{
text: 'Overview',
items: [
{ text: 'openmcpsdk.js', link: '/en/sdk-tutorial/' }
{ text: 'openmcpsdk.js', link: '/sdk-tutorial/' }
]
},
{
text: 'Basic Usage',
items: [
{ text: 'Simplest Conversation', link: '/en/sdk-tutorial/usage/greet' },
{ text: 'Task Loop', link: '/en/sdk-tutorial/usage/task-loop' },
{ text: 'Multiple Server Connections', link: '/en/sdk-tutorial/usage/multi-server' }
{ text: 'Simplest Conversation', link: '/sdk-tutorial/usage/greet' },
{ text: 'Task Loop', link: '/sdk-tutorial/usage/task-loop' },
{ text: 'Multiple Server Connections', link: '/sdk-tutorial/usage/multi-server' }
]
}
]

View File

@ -1,6 +1,6 @@
export default {
nav: [
{ text: 'ホーム', link: '/' },
{ text: 'ホーム', link: '/ja/' },
{
text: 'チュートリアル',
items: [
@ -50,7 +50,6 @@ export default {
}
},
]
},
{ text: 'SDK', link: '/ja/sdk-tutorial/' },
{

View File

@ -1,6 +1,6 @@
export default {
nav: [
{ text: '首页', link: '/' },
{ text: '首页', link: '/zh/' },
{
text: '教程',
items: [
@ -10,7 +10,7 @@ export default {
title: '简介',
description: '关于 mcp 和 openmcp阁下需要知道的 ...',
icon: 'openmcp',
link: '/plugin-tutorial/'
link: '/zh/plugin-tutorial/'
}
},
{
@ -19,7 +19,7 @@ export default {
title: '快速开始',
description: '通过一个例子快速了解 OpenMCP 的基本概念',
icon: 'quick-fill',
link: '/plugin-tutorial/quick-start/'
link: '/zh/plugin-tutorial/quick-start/'
}
},
{
@ -28,7 +28,7 @@ export default {
title: 'OpenMCP 使用手册',
description: 'OpenMCP Client 的基本使用',
icon: 'shiyongshouce',
link: '/plugin-tutorial/usage/connect-mcp'
link: '/zh/plugin-tutorial/usage/connect-mcp'
}
},
{
@ -37,7 +37,7 @@ export default {
title: 'MCP 服务器开发案例',
description: '使用不同语言开发的不同模式的 MCP 服务器',
icon: 'yibangonggongyusuan',
link: '/plugin-tutorial/examples/mcp-examples'
link: '/zh/plugin-tutorial/examples/mcp-examples'
}
},
{
@ -46,12 +46,12 @@ export default {
title: 'FAQ',
description: '为您答疑解惑,排忧解难',
icon: 'yijianchuli',
link: '/plugin-tutorial/faq/help'
link: '/zh/plugin-tutorial/faq/help'
}
},
]
},
{ text: 'SDK', link: '/sdk-tutorial/' },
{ text: 'SDK', link: '/zh/sdk-tutorial/' },
{
text: '更多',
items: [
@ -61,7 +61,7 @@ export default {
title: '更新日志',
description: '查看项目的更新历史记录',
icon: 'a-yusuan2',
link: '/preview/changelog'
link: '/zh/preview/changelog'
}
},
{
@ -70,7 +70,7 @@ export default {
title: '参与 OpenMCP',
description: '了解如何参与 OpenMCP 项目的开发和维护',
icon: 'shujuzhongxin',
link: '/preview/join'
link: '/zh/preview/join'
}
},
{
@ -79,7 +79,7 @@ export default {
title: 'OpenMCP 贡献者列表',
description: '关于参与 OpenMCP 的贡献者们',
icon: 'heike',
link: '/preview/contributors'
link: '/zh/preview/contributors'
}
},
{
@ -88,78 +88,78 @@ export default {
title: '资源频道',
description: '获取项目相关的资源和信息',
icon: 'xinxiang',
link: '/preview/channel'
link: '/zh/preview/channel'
}
}
]
},
],
sidebar: {
'/plugin-tutorial/': [
'/zh/plugin-tutorial/': [
{
text: '简介',
items: [
{ text: 'OpenMCP 概述', link: '/plugin-tutorial/index' },
{ text: '什么是 MCP', link: '/plugin-tutorial/what-is-mcp' },
{ text: 'MCP 基础概念', link: '/plugin-tutorial/concept' },
{ text: 'OpenMCP 概述', link: '/zh/plugin-tutorial/index' },
{ text: '什么是 MCP', link: '/zh/plugin-tutorial/what-is-mcp' },
{ text: 'MCP 基础概念', link: '/zh/plugin-tutorial/concept' },
]
},
{
text: '快速开始',
items: [
{ text: '快速开始', link: '/plugin-tutorial/quick-start' },
{ text: '安装 OpenMCP', link: '/plugin-tutorial/quick-start/acquire-openmcp' },
{ text: '你的第一个 MCP', link: '/plugin-tutorial/quick-start/first-mcp' },
{ text: '快速调试 MCP', link: '/plugin-tutorial/quick-start/quick-debug' },
{ text: '扔进大模型里面测测好坏!', link: '/plugin-tutorial/quick-start/put-into-llm' },
{ text: '快速开始', link: '/zh/plugin-tutorial/quick-start' },
{ text: '安装 OpenMCP', link: '/zh/plugin-tutorial/quick-start/acquire-openmcp' },
{ text: '你的第一个 MCP', link: '/zh/plugin-tutorial/quick-start/first-mcp' },
{ text: '快速调试 MCP', link: '/zh/plugin-tutorial/quick-start/quick-debug' },
{ text: '扔进大模型里面测测好坏!', link: '/zh/plugin-tutorial/quick-start/put-into-llm' },
]
},
{
text: '使用手册',
items: [
{ text: 'UI 配色', link: '/plugin-tutorial/usage/ui-color' },
{ text: '连接 MCP 服务器', link: '/plugin-tutorial/usage/connect-mcp' },
{ text: '调试 tools, resources 和 prompts', link: '/plugin-tutorial/usage/debug' },
{ text: '连接大模型', link: '/plugin-tutorial/usage/connect-llm' },
{ text: '用大模型测试你的 MCP', link: '/plugin-tutorial/usage/test-with-llm' },
{ text: '连接多个 MCP 服务', link: '/plugin-tutorial/usage/multi-server' },
{ text: '分发实验结果', link: '/plugin-tutorial/usage/distribute-result' },
{ text: 'SSE 鉴权器实现', link: '/plugin-tutorial/usage/sse-oauth2' },
{ text: 'UI 配色', link: '/zh/plugin-tutorial/usage/ui-color' },
{ text: '连接 MCP 服务器', link: '/zh/plugin-tutorial/usage/connect-mcp' },
{ text: '调试 tools, resources 和 prompts', link: '/zh/plugin-tutorial/usage/debug' },
{ text: '连接大模型', link: '/zh/plugin-tutorial/usage/connect-llm' },
{ text: '用大模型测试你的 MCP', link: '/zh/plugin-tutorial/usage/test-with-llm' },
{ text: '连接多个 MCP 服务', link: '/zh/plugin-tutorial/usage/multi-server' },
{ text: '分发实验结果', link: '/zh/plugin-tutorial/usage/distribute-result' },
{ text: 'SSE 鉴权器实现', link: '/zh/plugin-tutorial/usage/sse-oauth2' },
]
},
{
text: '开发案例',
items: [
{ text: 'MCP 服务器开发案例', link: '/plugin-tutorial/examples/mcp-examples' },
{ text: '例子 1. python 实现天气信息 mcp 服务器 (STDIO)', link: '/plugin-tutorial/examples/python-simple-stdio' },
{ text: '例子 2. go 实现 neo4j 的只读 mcp 服务器 (SSE)', link: '/plugin-tutorial/examples/go-neo4j-sse' },
{ text: '例子 3. java 实现文档数据库的只读 mcp (HTTP)', link: '/plugin-tutorial/examples/java-es-http' },
{ text: '例子 4. typescript 实现基于 crawl4ai 的超级网页爬虫 mcp (STDIO)', link: '/plugin-tutorial/examples/typescript-crawl4ai-stdio' },
{ text: '例子 5. python 实现进行通用表单填充 的 mcp (STDIO)', link: '/plugin-tutorial/examples/python-form-stdio' },
{ text: '例子 6. python 实现基于 blender 的 mcp (STDIO)', link: '/plugin-tutorial/examples/python-blender-stdio' },
{ text: '例子 7. python 实现 cadence EDA 的 mcp (STDIO)', link: '/plugin-tutorial/examples/python-cadence-stdio' },
{ text: 'MCP 服务器开发案例', link: '/zh/plugin-tutorial/examples/mcp-examples' },
{ text: '例子 1. python 实现天气信息 mcp 服务器 (STDIO)', link: '/zh/plugin-tutorial/examples/python-simple-stdio' },
{ text: '例子 2. go 实现 neo4j 的只读 mcp 服务器 (SSE)', link: '/zh/plugin-tutorial/examples/go-neo4j-sse' },
{ text: '例子 3. java 实现文档数据库的只读 mcp (HTTP)', link: '/zh/plugin-tutorial/examples/java-es-http' },
{ text: '例子 4. typescript 实现基于 crawl4ai 的超级网页爬虫 mcp (STDIO)', link: '/zh/plugin-tutorial/examples/typescript-crawl4ai-stdio' },
{ text: '例子 5. python 实现进行通用表单填充 的 mcp (STDIO)', link: '/zh/plugin-tutorial/examples/python-form-stdio' },
{ text: '例子 6. python 实现基于 blender 的 mcp (STDIO)', link: '/zh/plugin-tutorial/examples/python-blender-stdio' },
{ text: '例子 7. python 实现 cadence EDA 的 mcp (STDIO)', link: '/zh/plugin-tutorial/examples/python-cadence-stdio' },
]
},
{
text: 'FAQ',
items: [
{ text: '帮助', link: '/plugin-tutorial/faq/help' },
{ text: '帮助', link: '/zh/plugin-tutorial/faq/help' },
]
}
],
'/sdk-tutorial/': [
'/zh/sdk-tutorial/': [
{
text: '简介',
items: [
{ text: 'openmcpsdk.js', link: '/sdk-tutorial/' },
{ text: 'openmcpsdk.js', link: '/zh/sdk-tutorial/' },
]
},
{
text: '基本使用',
items: [
{ text: '最简单的对话', link: '/sdk-tutorial/usage/greet' },
{ text: '任务循环', link: '/sdk-tutorial/usage/task-loop' },
{ text: '多服务器连接', link: '/sdk-tutorial/usage/multi-server' },
{ text: '最简单的对话', link: '/zh/sdk-tutorial/usage/greet' },
{ text: '任务循环', link: '/zh/sdk-tutorial/usage/task-loop' },
{ text: '多服务器连接', link: '/zh/sdk-tutorial/usage/multi-server' },
]
}
]

BIN
.vitepress/theme/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,95 @@
@font-face {
font-family: "iconfont"; /* Project id 4933953 */
src: url('iconfont.woff2?t=1748520354582') format('woff2'),
url('iconfont.woff?t=1748520354582') format('woff'),
url('iconfont.ttf?t=1748520354582') format('truetype');
}
.iconfont {
font-family: "iconfont" !important;
font-style: normal;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
.icon-quick-fill:before {
content: "\e863";
}
.icon-heike:before {
content: "\e6c5";
}
.icon-bumendongtai:before {
content: "\e61f";
}
.icon-duzhengyishi:before {
content: "\e620";
}
.icon-lianwangzhongxin:before {
content: "\e621";
}
.icon-fenxitongji:before {
content: "\e622";
}
.icon-shujuzhongxin:before {
content: "\e623";
}
.icon-shuju:before {
content: "\e624";
}
.icon-shenji:before {
content: "\e625";
}
.icon-yusuan:before {
content: "\e626";
}
.icon-yibangonggongyusuan:before {
content: "\e627";
}
.icon-xinxiang:before {
content: "\e628";
}
.icon-yujing:before {
content: "\e629";
}
.icon-yijianchuli:before {
content: "\e62a";
}
.icon-zhuanti:before {
content: "\e62b";
}
.icon-a-yusuan2:before {
content: "\e62c";
}
.icon-yujuesuanshencha:before {
content: "\e62d";
}
.icon-zhengcefagui:before {
content: "\e62e";
}
.icon-ziliao:before {
content: "\e62f";
}
.icon-zixuntousu:before {
content: "\e630";
}

Binary file not shown.

BIN
.vscode/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -1,127 +0,0 @@
---
# https://vitepress.dev/reference/default-theme-home-page
layout: home
hero:
name: "OpenMCP"
text: "MCP Debugger and SDK for Elegant Developers"
tagline: Bridge the last mile from large language models to intelligent agents
actions:
- theme: brand
text: OpenMCP Plugin
link: ./plugin-tutorial
- theme: alt
text: openmcp-sdk
link: ./sdk-tutorial
- theme: alt
text: GitHub
link: https://github.com/LSTM-Kirigaya/openmcp-client
features:
- icon:
src: /images/icons/vscode.svg
height: 48px
alt: Integrated Debugging Environment
title: Integrated Debugging Environment
details: Combine the inspector with MCP client functions to achieve seamless development and testing
- icon:
src: /images/icons/openmcp-edge.svg
height: 48px
alt: Provide a complete project-level control panel
title: Comprehensive Project Management
details: Provide a complete project-level control panel for efficient MCP project supervision
- icon:
src: /images/icons/openmcp-sdk.svg
height: 48px
alt: Provide a complete project-level control panel
title: Complete Deployment Solution
details: Deploy tested agents to your application or server via openmcp-sdk
---
<br><br>
<h2 id="home-0">
Resolve Issues in Your MCP Agent Development
<br>
<span>Providing Fun and Convenience for Your MCP Agent Development</span>
</h2>
<div class="bilibili-player-container" style="display:flex; width: 100%; justify-content: center;">
<iframe width="90%" height="580" src="https://www.youtube.com/embed/S7igsEhcLiw?si=6sqvbYJxSRoFS26g" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
</div>
<br>
<h2 id="home-1">
Who is OpenMCP for?
<br>
<span>The Development of OpenMCP is for ...</span>
</h2>
<br>
<KTab class="home-tab">
<TwoSideLayout
label="Professional Software Engineers"
:texts="[
'Shift testing left to integrate your development and testing without opening third - party software. It offers extremely rich features.',
'Manage, debug, and test your intelligent agents freely and elegantly on the left - hand panel.',
'Every detail of the large language model calling tool is visible at a glance. You can directly reproduce unsatisfactory calling results with one click.',
'Each conversation will display various performance indicators, facilitating cost management.',
'The system prompt management panel allows you to easily build your intelligent agent applications with MCP servers and system prompts.'
]"
image="./images/openmcp.chatbot.png"
/>
<TwoSideLayout
label="Open - Source Community Enthusiasts"
:texts="[
'Shift testing left to integrate your development and testing without opening third - party software. It offers extremely rich features.',
'OpenMCP is completely open - source. You can not only try this product for free but also join us to realize your creative ideas about agents.',
'The technical details are fully disclosed. You don\'t have to worry about your ideas and tokens being plagiarized.',
'The persistent system prompt management panel enables you to test the system prompts of actual MCP servers for sharing within the community.',
'The details of each test will be 100% under Git version control, making it easy for you to share your test results and reproduce others\' MCP projects at zero cost.'
]"
image="./images/opensource.png"
/>
<TwoSideLayout
label="AI Research Scientists"
:texts="[
'Shift testing left to integrate your development and testing without opening third - party software. It offers extremely rich features.',
'With just a few lines of code, you can quickly turn your scientific research results into MCP servers and connect to any large language model to achieve a user - friendly interface.',
'All experimental data and configuration parameters are automatically included in the Git version management system, ensuring that research results are traceable and reproducible for academic exchanges and paper reproduction.',
'Based on OpenMCP, you can quickly complete your demo and shorten the distance from innovation to implementation.'
]"
image="./images/openmcp.chatbot.png"
/>
</KTab>
<br>
<h2 id="home-2">
FAQ
<br>
<span>Waiting for Your Questions</span>
</h2>
<el-collapse>
<el-collapse-item title="What is OpenMCP suitable for?" name="1">
As its name suggests, OpenMCP is an MCP debugger and SDK for developers, committed to reducing the full - chain development cost of AI agents and the mental burden of developers. Our mission is to create MCP tools that can solve real - life problems and save working time through OpenMCP, or help engineers and research scientists deliver demos more quickly and make this vision visible to the public.
</el-collapse-item>
<el-collapse-item title="Is OpenMCP free?" name="2">
Yes, OpenMCP is completely open - source. You can not only use this product for free but also join us to realize your creative ideas about agents. The task of OpenMCP is to build an ecosystem around MCP. We believe that MCP development will be a highly customized task in the future, so our current focus is not to rush to create an all - purpose agent, but to steadily build the relevant ecosystem and infrastructure.
</el-collapse-item>
<el-collapse-item title="What is OpenMCP not suitable for?" name="3">
If you try to develop an all - purpose, general AI agent through OpenMCP, you should invest all your money in the research and development of quantum computers instead of visiting this website. Remember, in this era, developing a full - domain general AI agent is likely to be equivalent to telecom fraud.
</el-collapse-item>
<el-collapse-item title="Who is developing OpenMCP?" name="4">
<p>OpenMCP was initially led by LSTM - Kirigaya (Jinhui) for building MCP testing tools related to 3D work. Its main participants include employees from large companies, students majoring in computer - related fields at universities, and some active contributors from the open - source community.</p>
<p>Identity is not important. I'd like to share a quote with you: "Don't tell me if you can do it. Tell me if you like it."</p>
<img src="https://pica.zhimg.com/80/v2-3666e84b2f92bf444a5eb64fb9d08e71_1440w.png" style="max-width: 500px;margin-top:10px;"/>
</el-collapse-item>
<el-collapse-item title="How can I join you or participate in discussions?" name="5">
You can learn how to participate in the maintenance and development of OpenMCP through <a href="https://kirigaya.cn/openmcp/preview/join.html" target="_blank">Participate in OpenMCP</a>. Obtain our contact information through <a href="https://kirigaya.cn/openmcp/preview/channel.html" target="_blank">Resource Channel</a>. Currently, there are three main communities: QQ group: 782833642, <a href="https://discord.com/invite/SKTZRf6NzU" target="_blank">OpenMCP Discord Channel</a>, and <a href="https://www.zhihu.com/ring/host/1911121615279849840" target="_blank">Zhihu Circle [OpenMCP Museum]</a>
</el-collapse-item>
<el-collapse-item title="How to contact us for cooperation?" name="6">
For cooperation, please contact Jinhui's personal email: 1193466151@qq.com
</el-collapse-item>
</el-collapse>

BIN
images/.DS_Store vendored

Binary file not shown.

View File

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 62 KiB

BIN
images/icons/.DS_Store vendored Normal file

Binary file not shown.

9
images/icons/ai.svg Normal file
View File

@ -0,0 +1,9 @@
<svg t="1748332079467" class="icon" viewBox="0 0 1024 1024" version="1.1"
xmlns="http://www.w3.org/2000/svg" p-id="1357" width="200" height="200">
<path
d="M490.08 116.288L868.736 928.32a32 32 0 1 0 58.016-27.072L548.064 89.28a32 32 0 1 0-57.984 27.04z"
fill="var(--vp-c-brand-1)" p-id="1358"></path>
<path
d="M550.176 116.288L171.488 928.32a32 32 0 1 1-57.984-27.072L492.16 89.28a32 32 0 0 1 58.016 27.04z"
fill="var(--vp-c-text-1)" p-id="1359"></path>
</svg>

After

Width:  |  Height:  |  Size: 504 B

9
images/icons/group.svg Normal file
View File

@ -0,0 +1,9 @@
<svg t="1748332003112" class="icon" viewBox="0 0 1024 1024" version="1.1"
xmlns="http://www.w3.org/2000/svg" p-id="1198" width="200" height="200">
<path
d="M82.784 773.76l409.152 175.36c23.36 10.016 49.824 10.016 73.184 0l409.152-175.36a30.976 30.976 0 0 0-24.384-56.96l-409.152 175.36a30.976 30.976 0 0 1-24.416 0L107.2 716.8a30.976 30.976 0 1 0-24.384 56.96z"
fill="var(--vp-c-brand-1)" p-id="1199"></path>
<path
d="M949.888 469.088a30.976 30.976 0 1 1 24.384 56.928l-409.152 175.36a92.896 92.896 0 0 1-73.184 0l-409.152-175.36a30.976 30.976 0 0 1 24.384-56.96l409.152 175.36a30.976 30.976 0 0 0 24.416 0zM539.008 65.504l4.032 1.408L969.824 245.76a38.016 38.016 0 0 1 0 70.048L543.04 494.624c-9.28 3.904-19.744 3.904-29.024 0L87.232 315.84a38.016 38.016 0 0 1 0-70.08l426.784-178.816c7.968-3.328 16.768-3.808 24.96-1.408zM528.48 128L163.904 280.768l364.608 152.8 364.64-152.8L528.512 128z"
fill="var(--vp-c-text-1)" p-id="1200"></path>
</svg>

After

Width:  |  Height:  |  Size: 988 B

BIN
images/icons/image.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

9
images/icons/monitor.svg Normal file
View File

@ -0,0 +1,9 @@
<svg t="1748331856103" class="icon" viewBox="0 0 1024 1024" version="1.1"
xmlns="http://www.w3.org/2000/svg" p-id="986" width="200" height="200">
<path
d="M786.56 887.744a28.896 28.896 0 0 1 0 57.792H208.544a28.896 28.896 0 0 1 0-57.792zM844.416 64a86.72 86.72 0 0 1 86.72 86.72V699.84a86.72 86.72 0 0 1-86.72 86.72H150.72A86.72 86.72 0 0 1 64 699.84V150.72A86.72 86.72 0 0 1 150.72 64h693.664z m0 57.792H150.72a28.896 28.896 0 0 0-28.928 28.928V699.84c0 15.968 12.96 28.896 28.928 28.896h693.664a28.896 28.896 0 0 0 28.896-28.896V150.72a28.896 28.896 0 0 0-28.896-28.928z"
fill="var(--vp-c-text-1)" p-id="987"></path>
<path
d="M228.352 548.224l166.592-157.376 172.064 160.48 244.768-235.264a28.896 28.896 0 0 0-40.064-41.664L566.4 471.744l-171.712-160.16-206.016 194.592a28.896 28.896 0 1 0 39.68 42.048z"
fill="var(--vp-c-brand-1)" p-id="988"></path>
</svg>

After

Width:  |  Height:  |  Size: 903 B

View File

@ -0,0 +1,76 @@
<?xml version="1.0" encoding="utf-8"?>
<svg width="2545" height="834" viewBox="0 0 2545 834" fill="none" xmlns="http://www.w3.org/2000/svg">
<defs>
<linearGradient id="gradient_1" gradientUnits="userSpaceOnUse" x1="300" y1="0" x2="300" y2="600">
<stop offset="0" stop-color="#A1A7F6" />
<stop offset="1" stop-color="#FFFFFF" stop-opacity="0.2" />
</linearGradient>
<linearGradient id="gradient_2" gradientUnits="userSpaceOnUse" x1="110.5" y1="0" x2="110.5" y2="221">
<stop offset="0.468" stop-color="#BFBAF6" />
<stop offset="1" stop-color="#FFFFFF" />
</linearGradient>
<filter color-interpolation-filters="sRGB" x="-219" y="-219" width="221" height="221" id="filter_3">
<feFlood flood-opacity="0" result="BackgroundImageFix_1" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0" in="SourceAlpha" />
<feOffset dx="0" dy="4" />
<feGaussianBlur stdDeviation="2" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.251 0" />
<feBlend mode="normal" in2="BackgroundImageFix_1" result="Shadow_2" />
<feBlend mode="normal" in="SourceGraphic" in2="Shadow_2" result="Shape_3" />
</filter>
<linearGradient id="gradient_4" gradientUnits="userSpaceOnUse" x1="55.5" y1="0" x2="55.5" y2="111">
<stop offset="0" stop-color="#FFFFFF" />
<stop offset="1" stop-color="#A8A7F3" />
</linearGradient>
<filter color-interpolation-filters="sRGB" x="-109" y="-109" width="111" height="111" id="filter_5">
<feFlood flood-opacity="0" result="BackgroundImageFix_1" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0" in="SourceAlpha" />
<feOffset dx="0" dy="4" />
<feGaussianBlur stdDeviation="2" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.251 0" />
<feBlend mode="normal" in2="BackgroundImageFix_1" result="Shadow_2" />
<feBlend mode="normal" in="SourceGraphic" in2="Shadow_2" result="Shape_3" />
</filter>
<linearGradient id="gradient_6" gradientUnits="userSpaceOnUse" x1="182.5" y1="0" x2="182.5" y2="365">
<stop offset="0.382" stop-color="#A594F6" />
<stop offset="1" stop-color="#FFFFFF" />
</linearGradient>
<filter color-interpolation-filters="sRGB" x="-363" y="-363" width="365" height="365" id="filter_7">
<feFlood flood-opacity="0" result="BackgroundImageFix_1" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0" in="SourceAlpha" />
<feOffset dx="0" dy="4" />
<feGaussianBlur stdDeviation="2" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.251 0" />
<feBlend mode="normal" in2="BackgroundImageFix_1" result="Shadow_2" />
<feBlend mode="normal" in="SourceGraphic" in2="Shadow_2" result="Shape_3" />
</filter>
<linearGradient id="gradient_8" gradientUnits="userSpaceOnUse" x1="57" y1="0" x2="57" y2="114">
<stop offset="0" stop-color="#FFFFFF" />
<stop offset="0.614" stop-color="#C7BAF8" />
</linearGradient>
<filter color-interpolation-filters="sRGB" x="-112" y="-112" width="114" height="114" id="filter_9">
<feFlood flood-opacity="0" result="BackgroundImageFix_1" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0" in="SourceAlpha" />
<feOffset dx="0" dy="4" />
<feGaussianBlur stdDeviation="2" />
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.251 0" />
<feBlend mode="normal" in2="BackgroundImageFix_1" result="Shadow_2" />
<feBlend mode="normal" in="SourceGraphic" in2="Shadow_2" result="Shape_3" />
</filter>
</defs>
<g>
<g>
<g transform="translate(145 58)">
<g>
<path d="M300 0C465.708 0 600 134.292 600 300C600 300 600 300 600 300C600 465.708 465.708 600 300 600C300 600 300 600 300 600C134.292 600 0 465.708 0 300C0 300 0 300 0 300C0 134.292 134.292 0 300 0Z" fill="#5A00FF" fill-rule="evenodd" />
<path d="M300 0C465.708 0 600 134.292 600 300C600 300 600 300 600 300C600 465.708 465.708 600 300 600C300 600 300 600 300 600C134.292 600 0 465.708 0 300C0 300 0 300 0 300C0 134.292 134.292 0 300 0Z" fill="url(#gradient_1)" fill-rule="evenodd" />
</g>
<path d="M0 110.5C0 49.4725 49.4725 0 110.5 0C171.527 0 221 49.4725 221 110.5C221 171.527 171.527 221 110.5 221C49.4725 221 0 171.527 0 110.5Z" fill="url(#gradient_2)" fill-rule="evenodd" filter="url(#filter_3)" transform="translate(294 341)" />
<path d="M0 55.5C0 24.8482 24.8482 0 55.5 0C86.1518 0 111 24.8482 111 55.5C111 86.1518 86.1518 111 55.5 111C24.8482 111 0 86.1518 0 55.5Z" fill="url(#gradient_4)" fill-rule="evenodd" filter="url(#filter_5)" transform="translate(48 269)" />
<path d="M0 182.5C0 81.708 81.708 0 182.5 0C283.292 0 365 81.708 365 182.5C365 283.292 283.292 365 182.5 365C81.708 365 0 283.292 0 182.5Z" fill="url(#gradient_6)" fill-rule="evenodd" filter="url(#filter_7)" transform="translate(188 39)" />
<path d="M0 57C0 25.5198 25.5198 0 57 0C88.4802 0 114 25.5198 114 57C114 88.4802 88.4802 114 57 114C25.5198 114 0 88.4802 0 57Z" fill="url(#gradient_8)" fill-rule="evenodd" filter="url(#filter_9)" transform="translate(401 130)" />
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 5.2 KiB

102
index.md
View File

@ -4,12 +4,12 @@ layout: home
hero:
name: "OpenMCP"
text: "面向优雅开发者的 MCP 调试器和 SDK"
tagline: 缩短从大语言模型到智能体的最后一公里
text: "MCP Debugger and SDK for Elegant Developers"
tagline: Bridge the last mile from large language models to intelligent agents
actions:
- theme: brand
text: OpenMCP 插件
text: OpenMCP Plugin
link: ./plugin-tutorial
- theme: alt
text: openmcp-sdk
@ -21,43 +21,39 @@ features:
- icon:
src: /images/icons/vscode.svg
height: 48px
alt: 集成调试环境
title: 集成调试环境
details: 将检查器与 MCP 客户端功能相结合,实现无缝开发和测试
alt: Integrated Debugging Environment
title: Integrated Debugging Environment
details: Combine the inspector with MCP client functions to achieve seamless development and testing
- icon:
src: /images/icons/openmcp-edge.svg
height: 48px
alt: 提供完整的项目级控制面板
title: 全面的项目管理
details: 提供完整的项目级控制面板,实现高效的 MCP 项目监督
alt: Provide a complete project-level control panel
title: Comprehensive Project Management
details: Provide a complete project-level control panel for efficient MCP project supervision
- icon:
src: /images/icons/openmcp-sdk.svg
height: 48px
alt: 提供完整的项目级控制面板
title: 完整的部署方案
details: 将测试完成的 agent 通过 openmcp-sdk 部署到您的应用或者服务器上
alt: Provide a complete project-level control panel
title: Complete Deployment Solution
details: Deploy tested agents to your application or server via openmcp-sdk
---
<br><br>
<h2 id="home-0">
为您的 MCP Agent 开发排忧解难
Resolve Issues in Your MCP Agent Development
<br>
<span>Providing Fun and Convenience for Your MCP Agent Development</span>
</h2>
<BiliPlayer
url="//player.bilibili.com/player.html?isOutside=true&aid=114654638511901&bvid=BV1MFTBzpEtZ&cid=30412178228&p=1"
cover="https://picx.zhimg.com/80/v2-ed6a7eb80dfeb2f188f11d89ca6c4b5a_1440w.png"
/>
<div class="bilibili-player-container" style="display:flex; width: 100%; justify-content: center;">
<iframe width="90%" height="580" src="https://www.youtube.com/embed/S7igsEhcLiw?si=6sqvbYJxSRoFS26g" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
</div>
<br>
<h2 id="home-1">
OpenMCP 为谁准备?
Who is OpenMCP for?
<br>
<span>The Development of OpenMCP is for ...</span>
</h2>
@ -66,34 +62,34 @@ OpenMCP 为谁准备?
<KTab class="home-tab">
<TwoSideLayout
label="专业软件工程师"
label="Professional Software Engineers"
:texts="[
'测试左移,让你的开发与测试一体化,无需打开第三方软件。提供极其丰富的功能和特性。',
'在左侧面板自由而优雅地管理、调试和测试你的智能体。',
'大模型调用工具的每一个细节一览无余,不满意的调用结果直接一键复现。',
'每一次对话都会显示各项性能指标,方便进行成本管理。',
'系统提示词管理面板,让您轻松用 mcp 服务器和系统提示词构建您的智能体应用。',
'Shift testing left to integrate your development and testing without opening third - party software. It offers extremely rich features.',
'Manage, debug, and test your intelligent agents freely and elegantly on the left - hand panel.',
'Every detail of the large language model calling tool is visible at a glance. You can directly reproduce unsatisfactory calling results with one click.',
'Each conversation will display various performance indicators, facilitating cost management.',
'The system prompt management panel allows you to easily build your intelligent agent applications with MCP servers and system prompts.'
]"
image="./images/openmcp.chatbot.png"
/>
<TwoSideLayout
label="开源社区爱好者"
label="Open - Source Community Enthusiasts"
:texts="[
'测试左移,让你的开发与测试一体化,无需打开第三方软件。提供极其丰富的功能和特性。',
'OpenMCP 完全开源,您不仅可以免费试用此产品,也可以一起加入我们,实现你的关于 Agent 的奇思妙想。',
'完全公开技术细节您不必担心您的创意和token会遭到剽窃。',
'可持久化的系统提示词管理面板,让您可以将实际的 mcp 服务器的系统提示词进行测试,以便于在社区内进行分享。',
'每一次测试的细节都会 100% 跟随 git 进行版本控制,方便你分享你的每一次试验结果,也方便你零成本复现别人的 mcp 项目。'
'Shift testing left to integrate your development and testing without opening third - party software. It offers extremely rich features.',
'OpenMCP is completely open - source. You can not only try this product for free but also join us to realize your creative ideas about agents.',
'The technical details are fully disclosed. You don\'t have to worry about your ideas and tokens being plagiarized.',
'The persistent system prompt management panel enables you to test the system prompts of actual MCP servers for sharing within the community.',
'The details of each test will be 100% under Git version control, making it easy for you to share your test results and reproduce others\' MCP projects at zero cost.'
]"
image="./images/opensource.png"
/>
<TwoSideLayout
label="AI研发科学家"
label="AI Research Scientists"
:texts="[
'测试左移,让你的开发与测试一体化,无需打开第三方软件。提供极其丰富的功能和特性。',
'只需几行代码,就能快速将您的科研成果做成 mcp 服务器,从而接入任意大模型,以实现用户友好型的交互界面。',
'所有实验数据与配置参数均自动纳入Git版本管理系统确保研究成果可追溯、可复现便于学术交流与论文复现。',
'基于 OpenMCP 快速完成您的 demo缩短创新到落地的距离。',
'Shift testing left to integrate your development and testing without opening third - party software. It offers extremely rich features.',
'With just a few lines of code, you can quickly turn your scientific research results into MCP servers and connect to any large language model to achieve a user - friendly interface.',
'All experimental data and configuration parameters are automatically included in the Git version management system, ensuring that research results are traceable and reproducible for academic exchanges and paper reproduction.',
'Based on OpenMCP, you can quickly complete your demo and shorten the distance from innovation to implementation.'
]"
image="./images/openmcp.chatbot.png"
/>
@ -102,30 +98,30 @@ OpenMCP 为谁准备?
<br>
<h2 id="home-2">
问题解答
FAQ
<br>
<span>Waiting for Your Questions</span>
</h2>
<el-collapse>
<el-collapse-item title="OpenMCP 适合做什么?" name="1">
正如它的名字一样OpenMCP 是一个面向开发者的 MCP 调试器和 SDK致力于降低 AI Agent 的全链路开发成本和开发人员的心智负担。通过 OpenMCP 制作出可以在真实生活场景中解决问题,缩短工作时间的 mcp 工具,或是让工程师与研发科学家更快地交付 demo并将这份愿景让公众看到是我们的任务和使命。
<el-collapse-item title="What is OpenMCP suitable for?" name="1">
As its name suggests, OpenMCP is an MCP debugger and SDK for developers, committed to reducing the full - chain development cost of AI agents and the mental burden of developers. Our mission is to create MCP tools that can solve real - life problems and save working time through OpenMCP, or help engineers and research scientists deliver demos more quickly and make this vision visible to the public.
</el-collapse-item>
<el-collapse-item title="OpenMCP 是免费的吗?" name="2">
是的OpenMCP 完全开源,您不仅可以免费使用此产品,也可以一起加入我们,实现你的关于 Agent 的奇思妙想。OpenMCP 的任务是建立起关于 MCP 的生态圈。因为我们认为MCP 的开发在未来一段时间内会是一项高度定制化的工作,所以当前的重点并不是赶紧出做一个看起来什么都能做的 Agent而是步步为营做出相关的生态和基础设施。
<el-collapse-item title="Is OpenMCP free?" name="2">
Yes, OpenMCP is completely open - source. You can not only use this product for free but also join us to realize your creative ideas about agents. The task of OpenMCP is to build an ecosystem around MCP. We believe that MCP development will be a highly customized task in the future, so our current focus is not to rush to create an all - purpose agent, but to steadily build the relevant ecosystem and infrastructure.
</el-collapse-item>
<el-collapse-item title="OpenMCP 不适合做什么?" name="3">
如果你试图通过 OpenMCP 开发一款什么都能做的,通用的 AI Agent你应该做的是把钱全部投资到量子计算机的研发而不是点开这个网站。记住一句话这个时代做全领域通用AI Agent依概率收敛到电信诈骗。
<el-collapse-item title="What is OpenMCP not suitable for?" name="3">
If you try to develop an all - purpose, general AI agent through OpenMCP, you should invest all your money in the research and development of quantum computers instead of visiting this website. Remember, in this era, developing a full - domain general AI agent is likely to be equivalent to telecom fraud.
</el-collapse-item>
<el-collapse-item title="OpenMCP 都是什么人在开发?" name="4">
<p>OpenMCP 是由 LSTM-Kirigaya(锦恢) 最初主导开发的,用于构建 3D 相关工作的 mcp 测试工具。它的主要参与者都是大厂在职员工,高校计算机相关专业的学生、以及一些开源社区的活跃贡献者。</p>
<p>身份不重要,我非常喜欢的一句话,送给阁下:“不要回答我你会不会,回答我,你喜不喜欢”。</p>
<el-collapse-item title="Who is developing OpenMCP?" name="4">
<p>OpenMCP was initially led by LSTM - Kirigaya (Jinhui) for building MCP testing tools related to 3D work. Its main participants include employees from large companies, students majoring in computer - related fields at universities, and some active contributors from the open - source community.</p>
<p>Identity is not important. I'd like to share a quote with you: "Don't tell me if you can do it. Tell me if you like it."</p>
<img src="https://pica.zhimg.com/80/v2-3666e84b2f92bf444a5eb64fb9d08e71_1440w.png" style="max-width: 500px;margin-top:10px;"/>
</el-collapse-item>
<el-collapse-item title="如何加入我们或者参与讨论?" name="5">
您可以通过 <a href="https://kirigaya.cn/openmcp/preview/join.html" target="_blank">参与 OpenMCP</a> 来了解如何参与 OpenMCP 的维护和开发。通过 <a href="https://kirigaya.cn/openmcp/preview/channel.html" target="_blank">资源频道</a> 来获取我们的联系方式。目前主要的社区有三个QQ群782833642 、 <a href="https://discord.com/invite/SKTZRf6NzU" target="_blank">OpenMCP Discord 频道</a> <a href="https://www.zhihu.com/ring/host/1911121615279849840" target="_blank">知乎圈子【OpenMCP 博物馆】</a>
<el-collapse-item title="How can I join you or participate in discussions?" name="5">
You can learn how to participate in the maintenance and development of OpenMCP through <a href="https://kirigaya.cn/openmcp/preview/join.html" target="_blank">Participate in OpenMCP</a>. Obtain our contact information through <a href="https://kirigaya.cn/openmcp/preview/channel.html" target="_blank">Resource Channel</a>. Currently, there are three main communities: QQ group: 782833642, <a href="https://discord.com/invite/SKTZRf6NzU" target="_blank">OpenMCP Discord Channel</a>, and <a href="https://www.zhihu.com/ring/host/1911121615279849840" target="_blank">Zhihu Circle [OpenMCP Museum]</a>
</el-collapse-item>
<el-collapse-item title="想要合作如何联系我们?" name="6">
合作请联系锦恢的个人邮箱:1193466151@qq.com
<el-collapse-item title="How to contact us for cooperation?" name="6">
For cooperation, please contact Jinhui's personal email: 1193466151@qq.com
</el-collapse-item>
</el-collapse>
</el-collapse>

View File

@ -1,32 +1,30 @@
# MCP 基础概念
# MCP Basic Concepts
## 前言
## Foreword
In the [[what-is-mcp|previous article]], we briefly introduced the definition of MCP and its basic organizational structure. As developers, what we need to focus on is how to customize the development of the MCP server based on our own business and scenario needs. This way, after directly connecting to any MCP client, we can provide the large model with the customized interaction capabilities we have developed.
Before we officially start teaching you how to develop your own MCP server, I think it might be necessary to clarify a few basic concepts.
在 [[what-is-mcp|之前的文章]] 中,我们简单介绍了 MCP 的定义和它的基本组织结构。作为开发者,我们最需要关注的其实是如何根据我们自己的业务和场景定制化地开发我们需要的 MCP 服务器,这样直接接入任何一个 MCP 客户端后,我们都可以给大模型以我们定制出的交互能力。
## Resources, Prompts, and Tools
在正式开始教大家如何开发自己的 MCP 服务器之前,我想,或许有必要讲清楚几个基本概念。
In the [MCP Client Protocol](https://modelcontextprotocol.io/clients), three very important capability categories in the MCP protocol are mentioned:
## Resources, Prompts 和 Tools
* Resources: Customized requests and access to local resources, which can be file systems, databases, files in the current code editor, etc., essentially **static resources** that web apps cannot access. Additional resources will enrich the context sent to the large model, allowing AI to give us more accurate responses.
* Prompts: Customized prompts that AI can adopt in certain scenarios. For example, if AI needs to return certain formatted content, custom prompts can be provided.
* Tools: Tools available for AI use. These must be functions, such as booking a hotel, opening a webpage, or turning off a lamp—these encapsulated functions can be a tool. The large model will use these tools via function calling. Tools will allow AI to directly operate our computer and even interact with the real world.
在 [MCP 客户端协议](https://modelcontextprotocol.io/clients) 中,讲到了 MCP 协议中三个非常重要的能力类别:
For those with front-end and back-end development experience, you can think of Resources as "read-only permissions granted to the large model" and Tools as "read-write permissions granted to the large model."
- Resouces 定制化地请求和访问本地的资源可以是文件系统、数据库、当前代码编辑器中的文件等等原本网页端的app 无法访问到的 **静态资源**。额外的 resources 会丰富发送给大模型的上下文,使得 AI 给我们更加精准的回答。
- Prompts :定制化一些场景下可供 AI 进行采纳的 prompt比如如果需要 AI 定制化地返回某些格式化内容时,可以提供自定义的 prompts。
- Tools :可供 AI 使用的工具,它必须是一个函数,比如预定酒店、打开网页、关闭台灯这些封装好的函数就可以是一个 tool大模型会通过 function calling 的方式来使用这些 tools。 Tools 将会允许 AI 直接操作我们的电脑,甚至和现实世界发生交互。
MCP clients (such as Claude Desktop, 5ire, etc.) have already implemented the front-end logic for the above. However, what resources and tools to provide requires each developers imagination. In other words, we need to develop rich and diverse MCP Servers to enable the large model to perform more interesting tasks.
各位拥有前后端开发经验的朋友们,可以将 Resouces 看成是「额外给予大模型的只读权限」,把 Tools 看成是「额外给予大模型的读写权限」。
However, one thing to note is that almost all large models currently use the OpenAI protocol as the access point for connecting to the large model. What does the OpenAI protocol mean?
MCP 客户端(比如 Claude Desktop5ire 等)已经实现好了上述的前端部分逻辑。而具体提供什么资源,具体提供什么工具,则需要各位玩家充分想象了,也就是我们需要开发丰富多彩的 MCP Server 来允许大模型做出更多有意思的工作。
## OpenAI Protocol
不过需要说明的一点是,目前几乎所有大模型采用了 openai 协议作为我们访问大模型的接入点。什么叫 openai 协议呢?
When developing an app using Python or TypeScript, we often install a library named OpenAI, where you fill in the model vendor, base URL of the model, and the type of model to directly access the large model. The various model providers must also support this library and protocol.
## openai 协议
当我们使用 python 或者 typescript 开发 app 时,往往会安装一个名为 openai 的库,里面填入你需要使用的模型厂商、模型的基础 url、使用的模型类别来直接访问大模型。而各个大模型提供商也必须支持这个库这套协议。
比如我们在 python 中访问 deepseek 的服务就可以这么做:
For example, to access the Deepseek service in Python, we can do it like this:
```python
from openai import OpenAI
@ -45,7 +43,7 @@ response = client.chat.completions.create(
print(response.choices[0].message.content)
```
如果你点进这个 create 函数去看,你会发现 openai 协议需要大模型厂家支持的 feature 是非常非常多的:
If you go into the `create` function, you will see that the OpenAI protocol requires a lot of features to be supported by the model provider:
```python
@overload
@ -92,13 +90,13 @@ print(response.choices[0].message.content)
) -> ChatCompletion:
```
从上面的签名中,你应该可以看到几个很熟悉的参数,比如 `temperature`, `top_p`,很多的大模型使用软件中,有的会给你暴露这个参数进行调节。比如在 5ire 中,内容随机度就是 `temperature` 这个参数的图形化显示。
From the above signature, you should see several familiar parameters like `temperature` and `top_p`. Many large model software expose these parameters for adjustment. For example, in 5ire, the content randomness is displayed graphically as the `temperature` parameter.
<div align=center>
<img src="https://picx.zhimg.com/80/v2-9f8544aa917e8c128fc194adeb7161cd_1440w.png" style="width: 100%;"/>
</div>
其实如你所见,一次普普通通调用涉及到的可调控参数是非常之多的。而在所有参数中,你可以注意到一个参数叫做 `tools`:
As you can see, a simple invocation involves many adjustable parameters. Among all these parameters, you can notice one called `tools`:
```python
@overload
@ -109,14 +107,14 @@ print(response.choices[0].message.content)
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
# 看这里
# Look here
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
) -> ChatCompletion:
```
## tool_calls 字段
## tool\_calls Field
在上面的 openai 协议中,有一个名为 tools 的参数。 tools 就是要求大模型厂商必须支持 function calling 这个特性,也就是我们提供一部分工具的描述(和 MCP 协议完全兼容的),在 tools 不为空的情况下chat 函数返回的值中会包含一个特殊的字段 `tool_calls`,我们可以运行下面的我写好的让大模型调用可以查询天气的代码:
In the OpenAI protocol above, there is a parameter called `tools`. This requires the large model provider to support the function calling feature, i.e., we provide a description of a set of tools (which is fully compatible with the MCP protocol). When `tools` is not empty, the value returned by the chat function will include a special field `tool_calls`. We can run the code I wrote to allow the large model to query the weather:
```python
from openai import OpenAI
@ -126,19 +124,19 @@ client = OpenAI(
base_url="https://api.deepseek.com"
)
# 定义 tools函数/工具列表)
# Define tools (functions/tool list)
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "获取给定地点的天气",
"description": "Get the weather for a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "城市,比如杭州,北京,上海",
"description": "City, e.g., Hangzhou, Beijing, Shanghai",
}
},
"required": ["location"],
@ -150,18 +148,18 @@ tools = [
response = client.chat.completions.create(
model="deepseek-chat",
messages=[
{"role": "system", "content": "你是一个很有用的 AI"},
{"role": "user", "content": "今天杭州的天气是什么?"},
{"role": "system", "content": "You are a helpful AI"},
{"role": "user", "content": "Whats the weather in Hangzhou today?"},
],
tools=tools, # 传入 tools 参数
tool_choice="auto", # 可选:控制是否强制调用某个工具
tools=tools, # Pass the tools parameter
tool_choice="auto", # Optional: control whether to force a specific tool call
stream=False,
)
print(response.choices[0].message)
```
运行上述代码,它的返回如下:
Running the above code, the return will be as follows:
```python
ChatCompletionMessage(
@ -175,7 +173,7 @@ ChatCompletionMessage(
ChatCompletionMessageToolCall(
id='call_0_baeaba2b-739d-40c2-aa6c-1e61c6d7e855',
function=Function(
arguments='{"location":"杭州"}',
arguments='{"location":"Hangzhou"}',
name='get_current_weather'
),
type='function',
@ -185,48 +183,4 @@ ChatCompletionMessage(
)
```
可以看到上面的 `tool_calls` 给出了大模型想要如何去使用我们给出的工具。需要说明的一点是,收到上下文的限制,目前一个问题能够让大模型调取的工具上限一般不会超过 100 个,这个和大模型厂商的上下文大小有关系。奥,对了,友情提示,当你使用 MCP 客户端在使用大模型解决问题时,同一时间激活的 MCP Server 越多,消耗的 token 越多哦 :D
而目前 openai 的协议中tools 是只支持函数类的调用。而函数类的调用往往是可以模拟出 Resources 的效果的。比如取资源,你可以描述为一个 tool。因此在正常情况下如果大家要开发 MCP Server最好只开发 Tools另外两个 feature 还暂时没有得到广泛支持。
## 使用 Inspector 进行调试
Claude 原生提供的 MCP 协议可以通过官方提供的 Inspector 进行调试,对于 [[first-mcp|你的第一个 MCP]] 中的例子,可以如下进行调试,在命令行输入如下命令启动 Inspector:
```bash
mcp dev main.py
```
这会启动一个前端服务器,并打开 `http://localhost:5173/` 后我们可以看到 inspector 的调试界面,先点击左侧的 `Connect` 来运行我们的 server.py 并通过 stdio 为通信管道和 web 建立通信。
Fine可以开始愉快地进行调试了Inspector 主要给了我们三个板块,分别对应 ResourcesPrompts 和 Tools。
先来看 Resources点击「Resource Templates」可以罗列所有注册的 Resource比如我们上文定义的 `get_greeting`,你可以通过输入参数运行来查看这个函数是否正常工作。(因为一般情况下的这个资源协议是会访问远程数据库或者微服务的)
<div align=center>
<img src="https://picx.zhimg.com/80/v2-71fc1ad813cdbf7ecec24d878c343b96_1440w.png" style="width: 100%;"/>
</div>
Prompts 端就比较简单了,直接输入预定义参数就能获取正常的返回结果。
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-4f42899ba1163922ac2347f7cebe5362_1440w.png" style="width: 100%;"/>
</div>
Tools 端将会是我们后面调试的核心。在之前的章节我们讲过了MCP 协议中的 Prompts 和 Resources 目前还没有被 openai 协议和各大 MCP 客户端广泛支持,因此,我们主要的服务端业务都应该是在写 tools。
我们此处提供的 tool 是实现一个简单的加法,它非常简单,我们输入 1 和 2 就可以直接看到结果是 3。我们后续会开发一个可以访问天气预报的 tool那么到时候就非常需要一个这样的窗口来调试我们的天气信息获取是否正常了。
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-4164a900198a70a158ae441f9e441d07_1440w.png" style="width: 100%;"/>
</div>
## 结语
这篇文章,我们简单了解了 MCP 内部的一些基本概念,我认为这些概念对于诸位开发一个 MCP 服务器是大有裨益的,所以我认为有必要先讲一讲。
下面的文章中,我将带领大家探索 MCP 的奇境,一个属于 AI Agent 的时代快要到来了。
As you can see, the `tool_calls` shows how the large model wants to use the tools we provided. One thing to note is that due to context limitations, the maximum number of tools a large model

View File

@ -1,488 +1,491 @@
# go 实现 neo4j 的只读 mcp 服务器 (SSE)
[本期教程视频](https://www.bilibili.com/video/BV1g8TozyEE7/)
# Implementing a Read-Only MCP Server for Neo4j in Go (SSE)
## 前言
[Video Tutorial](https://www.bilibili.com/video/BV1g8TozyEE7/)
本篇教程,演示一下如何使用 go 语言写一个可以访问 neo4j 数据库的 mcp 服务器。实现完成后,我们不需要写任何 查询代码 就能通过询问大模型了解服务器近况。
## Introduction
不同于之前的连接方式,这次,我们将采用 SSE 的方式来完成服务器的创建和连接。
This tutorial demonstrates how to use Go to create an MCP server that can access a Neo4j database. Once implemented, you can query the server's status via a large language model (LLM) without writing any additional query code.
本期教程的代码https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/neo4j-go-server
Unlike previous connection methods, this time well use Server-Sent Events (SSE) to create and connect the server.
建议下载本期的代码,因为里面有我为大家准备好的数据库文件。要不然,你们得自己 mock 数据了。
The code for this tutorial: [https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/neo4j-go-server](https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/neo4j-go-server)
Its recommended to download the code for this tutorial, as it includes a pre-prepared database file. Otherwise, youll need to mock the data yourself.
---
## 1. 准备
## 1. Preparation
项目结构如下:
The project structure is as follows:
```bash
📦neo4j-go-server
┣ 📂util
┃ ┗ 📜util.go # 工具函数
┣ 📜main.go # 主函数
┗ 📜neo4j.json # 数据库连接的账号密码
```
📦neo4j-go-server
┣ 📂util
┃ ┗ 📜util.go # Utility functions
┣ 📜main.go # Main function
┗ 📜neo4j.json # Database connection credentials
```
我们先创建一个 go 项目:
First, create a Go project:
```bash
mkdir neo4j-go-server
cd neo4j-go-server
go mod init neo4j-go-server
```
mkdir neo4j-go-server
cd neo4j-go-server
go mod init neo4j-go-server
```
---
## 2. Database Initialization
## 2. 完成数据库初始化
### 2.1 Install Neo4j
### 2.1 安装 neo4j
First, set up a Neo4j database locally or on a server following [this tutorial](https://kirigaya.cn/blog/article?seq=199). You only need to complete the first two steps. Add the `bin` path to your environment variables and set the password to `openmcp`.
首先,根据我的教程在本地或者服务器配置一个 neo4j 数据库,这里是是教程,你只需要完成该教程的前两步即可: [neo4j 数据库安装与配置](https://kirigaya.cn/blog/article?seq=199)。将 bin 路径加入环境变量,并且设置的密码设置为 openmcp。
然后在 main.go 同级下创建 neo4j.json填写 neo4j 数据库的连接信息:
Next, create `neo4j.json` in the same directory as `main.go` and fill in the connection details:
```json
{
"url" : "neo4j://localhost:7687",
"name" : "neo4j",
"password" : "openmcp"
"url": "neo4j://localhost:7687",
"name": "neo4j",
"password": "openmcp"
}
```
```
### 2.2 导入事先准备好的数据
### 2.2 Import Pre-Prepared Data
安装完成后,大家可以导入我实现准备好的数据,这些数据是我的个人网站上部分数据脱敏后的摘要,大家可以随便使用,下载链接:[neo4j.db](https://github.com/LSTM-Kirigaya/openmcp-tutorial/releases/download/neo4j.db/neo4j.db)。下载完成后,运行下面的命令:
After installation, import the pre-prepared data. This data is an anonymized excerpt from my personal website and can be freely used. Download link: [neo4j.db](https://github.com/LSTM-Kirigaya/openmcp-tutorial/releases/download/neo4j.db/neo4j.db). After downloading, run the following commands:
```bash
neo4j stop
neo4j-admin load --database neo4j --from neo4j.db --force
neo4j start
```
neo4j stop
neo4j-admin load --database neo4j --from neo4j.db --force
neo4j start
```
然后,我们登录数据库就能看到我准备好的数据啦:
Then, log in to the database to see the imported data:
```bash
cypher-shell -a localhost -u neo4j -p openmcp
```
cypher-shell -a localhost -u neo4j -p openmcp
```
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-4b53ad6a355c05d99c7ed18687ced717_1440w.png" style="width: 80%;"/>
</div>
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-4b53ad6a355c05d99c7ed18687ced717_1440w.png" style="width: 80%;"/>
</div>
### 2.3 验证 go -> 数据库连通性
### 2.3 Verify Go-to-Database Connectivity
为了验证数据库的连通性和 go 的数据库驱动是否正常工作,我们需要先写一段数据库访问的最小系统。
To verify connectivity and the Go drivers functionality, well first implement a minimal database access system.
先安装 neo4j 的 v5 版本的 go 驱动:
Install the Neo4j Go driver (v5):
```bash
go get github.com/neo4j/neo4j-go-driver/v5
```
go get github.com/neo4j/neo4j-go-driver/v5
```
`util.go` 中添加以下代码:
Add the following code to `util.go`:
```go
package util
package util
import (
"context"
"encoding/json"
"fmt"
"os"
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
)
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
)
var (
Neo4jDriver neo4j.DriverWithContext
)
var (
Neo4jDriver neo4j.DriverWithContext
)
// 创建 neo4j 服务器的连接
func CreateNeo4jDriver(configPath string) (neo4j.DriverWithContext, error) {
jsonString, _ := os.ReadFile(configPath)
config := make(map[string]string)
// Create a connection to the Neo4j server
func CreateNeo4jDriver(configPath string) (neo4j.DriverWithContext, error) {
jsonString, _ := os.ReadFile(configPath)
config := make(map[string]string)
json.Unmarshal(jsonString, &config)
// fmt.Printf("url: %s\nname: %s\npassword: %s\n", config["url"], config["name"], config["password"])
json.Unmarshal(jsonString, &config)
var err error
Neo4jDriver, err = neo4j.NewDriverWithContext(
config["url"],
neo4j.BasicAuth(config["name"], config["password"], ""),
)
if err != nil {
return Neo4jDriver, err
}
return Neo4jDriver, nil
}
var err error
Neo4jDriver, err = neo4j.NewDriverWithContext(
config["url"],
neo4j.BasicAuth(config["name"], config["password"], ""),
)
if err != nil {
return Neo4jDriver, err
}
return Neo4jDriver, nil
}
// Execute a read-only Cypher query
func ExecuteReadOnlyCypherQuery(
cypher string,
) ([]map[string]any, error) {
session := Neo4jDriver.NewSession(context.TODO(), neo4j.SessionConfig{
AccessMode: neo4j.AccessModeRead,
})
// 执行只读的 cypher 查询
func ExecuteReadOnlyCypherQuery(
cypher string,
) ([]map[string]any, error) {
session := Neo4jDriver.NewSession(context.TODO(), neo4j.SessionConfig{
AccessMode: neo4j.AccessModeRead,
})
defer session.Close(context.TODO())
defer session.Close(context.TODO())
result, err := session.Run(context.TODO(), cypher, nil)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
result, err := session.Run(context.TODO(), cypher, nil)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
var records []map[string]any
for result.Next(context.TODO()) {
records = append(records, result.Record().AsMap())
}
var records []map[string]any
for result.Next(context.TODO()) {
records = append(records, result.Record().AsMap())
}
return records, nil
}
```
return records, nil
}
```
main.go 中添加以下代码:
Add the following code to `main.go`:
```go
package main
package main
import (
"fmt"
"neo4j-go-server/util"
)
import (
"fmt"
"neo4j-go-server/util"
)
var (
neo4jPath string = "./neo4j.json"
)
var (
neo4jPath string = "./neo4j.json"
)
func main() {
_, err := util.CreateNeo4jDriver(neo4jPath)
if err != nil {
fmt.Println(err)
return
}
func main() {
_, err := util.CreateNeo4jDriver(neo4jPath)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Neo4j driver created successfully")
}
```
fmt.Println("Neo4j driver created successfully")
}
```
运行主程序来验证数据库的连通性:
Run the program to verify database connectivity:
```bash
go run main.go
```
go run main.go
```
如果输出了 `Neo4j driver created successfully`,则说明数据库的连通性验证通过。
If the output is `Neo4j driver created successfully`, the connection is successful.
---
## 3. Implement the MCP Server
## 3. 实现 mcp 服务器
The most popular Go MCP SDK is `mark3labs/mcp-go`. Well use this.
go 的 mcp 的 sdk 最为有名的是 mark3labs/mcp-go 了,我们就用这个。
> The demo for `mark3labs/mcp-go` is at [https://github.com/mark3labs/mcp-go](https://github.com/mark3labs/mcp-go). Its very simple, so well use it directly.
> mark3labs/mcp-go 的 demo 在 https://github.com/mark3labs/mcp-go非常简单此处直接使用即可。
先安装
Install it first:
```bash
go get github.com/mark3labs/mcp-go
```
go get github.com/mark3labs/mcp-go
```
然后在 `main.go` 中添加以下代码:
Then, add the following code to `main.go`:
```go
// ... existing code ...
// ... existing code ...
var (
addr string = "localhost:8083"
)
var (
addr string = "localhost:8083"
)
func main() {
// ... existing code ...
func main() {
// ... existing code ...
s := server.NewMCPServer(
"只读 Neo4j 服务器",
"0.0.1",
server.WithToolCapabilities(true),
)
s := server.NewMCPServer(
"Read-Only Neo4j Server",
"0.0.1",
server.WithToolCapabilities(true),
)
srv := server.NewSSEServer(s)
// 定义 executeReadOnlyCypherQuery 这个工具的 schema
executeReadOnlyCypherQuery := mcp.NewTool("executeReadOnlyCypherQuery",
mcp.WithDescription("执行只读的 Cypher 查询"),
mcp.WithString("cypher",
mcp.Required(),
mcp.Description("Cypher 查询语句,必须是只读的"),
),
)
// 将真实函数和申明的 schema 绑定
s.AddTool(executeReadOnlyCypherQuery, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
args, ok := request.Params.Arguments.(map[string]interface{})
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("invalid arguments type")
}
cypher, ok := args["cypher"].(string)
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("cypher argument is not a string")
}
result, err := util.ExecuteReadOnlyCypherQuery(cypher)
srv := server.NewSSEServer(s)
fmt.Println(result)
// Define the schema for the `executeReadOnlyCypherQuery` tool
executeReadOnlyCypherQuery := mcp.NewTool("executeReadOnlyCypherQuery",
mcp.WithDescription("Execute a read-only Cypher query"),
mcp.WithString("cypher",
mcp.Required(),
mcp.Description("Cypher query statement (must be read-only)"),
),
)
if err != nil {
return mcp.NewToolResultText(""), err
}
// Bind the actual function to the declared schema
s.AddTool(executeReadOnlyCypherQuery, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
args, ok := request.Params.Arguments.(map[string]interface{})
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("invalid arguments type")
}
cypher, ok := args["cypher"].(string)
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("cypher argument is not a string")
}
result, err := util.ExecuteReadOnlyCypherQuery(cypher)
return mcp.NewToolResultText(fmt.Sprintf("%v", result)), nil
})
// 在 http://localhost:8083/sse 开启服务
fmt.Printf("Server started at http://%s/sse\n", addr)
srv.Start(addr)
}
```
fmt.Println(result)
go run main.go 运行上面的代码,你就能看到如下信息:
if err != nil {
return mcp.NewToolResultText(""), err
}
return mcp.NewToolResultText(fmt.Sprintf("%v", result)), nil
})
// Start the server at http://localhost:8083/sse
fmt.Printf("Server started at http://%s/sse\n", addr)
srv.Start(addr)
}
```
Run the server:
```bash
go run main.go
```
You should see the following output:
```
Neo4j driver created successfully
Server started at http://localhost:8083/sse
```
Neo4j driver created successfully
Server started at http://localhost:8083/sse
```
说明我们的 mcp 服务器在本地的 8083 上启动了。
This means the MCP server is running locally on port 8083.
---
## 4. Debugging with OpenMCP
## 4. 通过 openmcp 来进行调试
### 4.1 Add the Workspace SSE Debugging Project
### 4.1 添加工作区 sse 调试项目
Next, well debug using OpenMCP. Click the OpenMCP icon on the left side of VSCode to enter the control panel. If you downloaded the project from [https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/neo4j-go-server](https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/neo4j-go-server), youll see a pre-configured debugging project named "Read-Only Neo4j Server" under "MCP Connections (Workspace)". If you built this project from scratch, you can add the connection manually by selecting SSE and entering `http://localhost:8083/sse` (leave OAuth blank).
接下来,我们来通过 openmcp 进行调试,先点击 vscode 左侧的 openmcp 图标进入控制面板,如果你是下载的 https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/neo4j-go-server 这个项目那么你能看到【MCP 连接(工作区)】里面已经有一个创建好的调试项目【只读 Neo4j 服务器】了。如果你是完全自己做的这个项目,可以通过下面的按钮添加连接,选择 sse 后填入 http://localhost:8083/sseoauth 空着不填即可。
<div align=center>
<img src="https://picx.zhimg.com/80/v2-31a01f1253dfc8c42e23e05b1869a932_1440w.png" style="width: 80%;"/>
</div>
<div align=center>
<img src="https://picx.zhimg.com/80/v2-31a01f1253dfc8c42e23e05b1869a932_1440w.png" style="width: 80%;"/>
</div>
### 4.2 Test the Tool
### 4.2 测试工具
The first step in debugging an MCP server is always testing the MCP tool. Create a new tab, select "Tool," click the tool shown below, and enter `CALL db.labels() YIELD label RETURN label`. This query lists all node types. If the output matches the expected result, the connection is working correctly.
第一次调试 mcp 服务器要做的事情一定是先调通 mcp tool新建标签页选择 tool点击下图的工具输入 `CALL db.labels() YIELD label RETURN label`,这个语句是用来列出所有节点类型的。如果输出下面的结果,说明当前的链路生效,没有问题。
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-dd59d9c96ecb455e527ab8aa7f963908_1440w.png" style="width: 100%;"/>
</div>
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-dd59d9c96ecb455e527ab8aa7f963908_1440w.png" style="width: 100%;"/>
</div>
### 4.3 Explore the LLMs Capabilities and Use Prompts to Encapsulate Knowledge
### 4.3 摸清大模型功能边界,用提示词来封装我们的知识
然后,让我们做点有趣的事情吧!我们接下来要测试一下大模型的能力边界,因为 neo4j 属于特种数据库,通用大模型不一定知道怎么用它。新建标签页,点击「交互测试」,我们先问一个简单的问题:
Now, lets do something fun! Well test the LLMs boundaries because Neo4j is a specialized database, and general-purpose LLMs may not know how to use it. Create a new tab, click "Interactive Test," and ask a simple question:
```
帮我找出最新的 10 条评论
```
Find the latest 10 comments for me.
```
结果如下:
The result is as follows:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-44fab30650051db4e3b94de34275af3a_1440w.png" style="width: 100%;"/>
</div>
<div align=center>
<img src="https://picx.zhimg.com/80/v2-44fab30650051db4e3b94de34275af3a_1440w.png" style="width: 100%;"/>
</div>
可以看到,大模型查询的节点类型就是错误的,在我提供的例子中,代表评论的节点是 BlogComment而不是 Comment。也就是说大模型并不掌握进行数据库查询的通用方法论。这就是我们目前知道的它的能力边界。我们接下来要一步一步地注入我们的经验和知识唔姆通过 system prompt 来完成。
You can see that the LLM queried the wrong node type. In my example, the node representing comments is `BlogComment`, not `Comment`. This means the LLM doesnt have a general methodology for querying databases. This is its current boundary. Well now inject our experience and knowledge step by step through the system prompt.
### 4.4 教大模型找数据库节点
### 4.4 Teach the LLM to Find Database Nodes
好好想一下,作为工程师的我们是怎么知道评论的节点是 BlogComment我们一般是通过罗列当前数据库的所有节点的类型来从命名中猜测的比如对于这个数据库我一般会先输入如下的 cypher 查询:
Think about it: How do we engineers know that the comment node is `BlogComment`? We usually list all node types in the database and guess from the naming. For this database, Id first run the following Cypher query:
```sql
CALL db.labels() YIELD label RETURN label
```
CALL db.labels() YIELD label RETURN label
```
它的输出就在 4.2 的图中,如果你的英文不错,也能看出来 BlogComment 大概率是代表博客评论的节点。好了,那么我们将这段方法论注入到 system prompt 中,从而封装我们的这层知识,点击下图的下方的按钮,进入到【系统提示词】:
The output is shown in Figure 4.2. If your English is decent, you can guess that `BlogComment` is likely the node for blog comments. Now, lets inject this methodology into the system prompt to encapsulate this knowledge. Click the button at the bottom of the image below to enter "System Prompts":
<div align=center>
<img src="https://pica.zhimg.com/80/v2-e0fdd265e53dd354163358be1f5cc3f6_1440w.png" style="width: 100%;"/>
</div>
<div align=center>
<img src="https://pica.zhimg.com/80/v2-e0fdd265e53dd354163358be1f5cc3f6_1440w.png" style="width: 100%;"/>
</div>
新建提示词【neo4j】输入
Create a new prompt named "neo4j" and enter:
```
你是一个善于进行neo4j查询的智能体对于用户要求的查询请求你并不一定知道对应的数据库节点是什么这个时候你需要先列出所有的节点类型然后从中找到你认为最有可能是匹配用户询问的节点。比如用户问你要看符合特定条件的「文章」你并不知道文章的节点类型是什么这个时候你就需要先列出所有的节点。
```
You are an agent skilled in Neo4j queries. For user requests, you may not know the corresponding database nodes. In such cases, you should first list all node types and identify the most likely match for the users query. For example, if the user asks for "articles" matching certain criteria, you dont know the node type for articles, so you need to list all nodes first.
```
点击保存,然后在【交互测试】中,重复刚才的问题:
Click "Save," then repeat the earlier question in "Interactive Test":
```
帮我找出最新的 10 条评论
```
Find the latest 10 comments for me.
```
大模型的回答如下:
The LLMs response is now:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-ccf4a5ecb5691620fca659dcd60d2e38_1440w.png" style="width: 80%;"/>
</div>
<div align=center>
<img src="https://picx.zhimg.com/80/v2-ccf4a5ecb5691620fca659dcd60d2e38_1440w.png" style="width: 80%;"/>
</div>
诶?怎么说,是不是好了很多了?大模型成功找到了 BlogComment 这个节点,然后返回了对应的数据。
Much better, right? The LLM successfully found the `BlogComment` node and returned the corresponding data.
但是其实还是不太对,因为我们要求的说最新的 10 条评论,但是大模型返回的其实是最早的 10 条评论,我们点开大模型的调用细节就能看到,大模型是通过 `ORDER BY comment.createdAt` 来实现的,但是问题是,在我们的数据库中,记录一条评论何时创建的字段并不是 createdAt而是 createdTime这意味着大模型并不知道自己不知道节点的字段从而产生了「幻觉」瞎输入了一个字段。
However, its still not quite right. We asked for the "latest" 10 comments, but the LLM returned the "earliest" 10. Looking at the LLMs call details, we can see it used `ORDER BY comment.createdAt`. The problem is that in our database, the field recording when a comment was created is not `createdAt` but `createdTime`. This means the LLM doesnt know it doesnt know the nodes fields, leading to "hallucinations" where it makes up a field.
大模型是不会显式说自己不知道的,锦恢研究生关于 OOD 的一项研究可以说明这件事的本质原因:[EDLEvidential Deep Learning 原理与代码实现](https://kirigaya.cn/blog/article?seq=154),如果阁下的好奇心能够配得上您的数学功底,可以一试这篇文章。总之,阁下只需要知道,正因为大模型对自己不知道的东西会产生幻觉,所以才有我们得以注入经验的操作空间。
LLMs wont explicitly admit ignorance. Research on OOD (Out-of-Distribution) by Jinhui during his graduate studies explains the fundamental reason: [EDL (Evidential Deep Learning) Principles and Code Implementation](https://kirigaya.cn/blog/article?seq=154). If your curiosity matches your mathematical skills, give this article a try. In short, because LLMs hallucinate about things they dont know, we have room to inject our experience.
### 4.5 教大模型找数据库节点的字段
### 4.5 Teach the LLM to Find Database Node Fields
通过上面的尝试,我们知道我们距离终点只剩一点了,那就是告诉大模型,我们的数据库中,记录一条评论何时创建的字段并不是 createdAt而是 createdTime。
From the above attempt, we know were close to the finish line. We just need to tell the LLM that in our database, the field recording when a comment was created is `createdTime`, not `createdAt`.
对于识别字段的知识,我们改良一下刚刚的系统提示词下:
To teach the LLM about fields, lets refine the system prompt:
```
你是一个善于进行neo4j查询的智能体对于用户要求的查询请求你并不一定知道对应的数据库节点是什么这个时候你需要先列出所有的节点类型然后从中找到你认为最有可能是匹配用户询问的节点。比如用户问你要看符合特定条件的「文章」你并不知道文章的节点类型是什么这个时候你就需要先列出所有的节点。
You are an agent skilled in Neo4j queries. For user requests, you may not know the corresponding database nodes. In such cases, you should first list all node types and identify the most likely match for the users query. For example, if the user asks for "articles" matching certain criteria, you dont know the node type for articles, so you need to list all nodes first.
对于比较具体的查询,你需要先查询单个事例来看一下当前类型有哪些字段。比如用户问你最新的文章,你是不知道文章节点的哪一个字段代表 「创建时间」的因此你需要先列出一到两个文章节点看一下里面有什么字段然后再创建查询查看最新的10篇文章。
```
For specific queries, you should first query a few examples to see what fields the node type has. For example, if the user asks for the "latest articles," you dont know which field represents the "creation time," so you need to list one or two article nodes to see the available fields before querying the latest 10 articles.
```
结果如下:
The result is now:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-e7a2faf43249fe108288604a2eb948ad_1440w.png" style="width: 80%;"/>
</div>
<div align=center>
<img src="https://picx.zhimg.com/80/v2-e7a2faf43249fe108288604a2eb948ad_1440w.png" style="width: 80%;"/>
</div>
Perfect, right?
是不是很完美?
By using OpenMCP for debugging, we can uniquely determine an agents behavior through the system prompt + MCP server.
通过使用 openmcp 调试,我们可以通过 system prompt + mcp server 来唯一确定一个 agent 的表现行为。
---
## 5. Expand the MCP Servers Atomic Skills
In the above example, although we injected our experience and knowledge through the system prompt, youll notice that behaviors like "query all node types" and "get all fields of a node" are very fixed processes. However, system prompts are written in natural language, which is inherently ambiguous. We cant guarantee theyll always be extensible. Besides system prompts, is there another way to inject our experience and knowledge? Yes, there is.
## 5. 扩充 mcp 服务器的原子技能
For processes that are fixed and easily conceivable by "somewhat experienced people," besides using system prompts, we can also standardize knowledge injection by writing these processes as additional MCP tools. This method is called "Atomization Supplement."
在上面的例子中,虽然我们通过 system prompt 注入了我们的经验和知识,但是其实你会发现这些我们注入的行为,比如「查询所有节点类型」和「获取一个节点的所有字段」,是不是流程很固定?但是 system prompt 是通过自然语言编写的,它具有语言特有的模糊性,我们无法保证它一定是可以拓展的。那么除了 system prompt还有什么方法可以注入我们的经验与知识呢有的兄弟有的。
Atomization Supplement involves adding extra MCP tools that are "atomic" in functionality.
在这种流程固定,而且这个操作也非常地容易让「稍微有点经验的人」也能想到的情况下,除了使用 system prompt 外,我们还有一个方法可以做到更加标准化地注入知识,也就是把上面的这些个流程写成额外的 mcp tool。这个方法被我称为「原子化扩充」(Atomization Supplement)。
> A tool is called an "Atomic Tool" if it meets one of the following conditions:
> - The tool cannot be obtained by combining finer-grained functionalities in a limited way.
> - The finer-grained functionalities used to compose the tool are not fully or reliably used by the LLM (e.g., assembly language, DOM queries).
所谓原子化扩充,也就是增加额外的 mcp tool这些 tool 在功能层面是「原子化」的。
Adding extra atomic tools lets the LLM know, "Ah! I have other tricks up my sleeve!" As long as the descriptions are appropriate, the LLM can use them to gather additional information instead of hallucinating and failing the task.
> 满足如下条件之一的 tool被称为 原子 tool (Atomic Tool)
> tool 无法由更加细粒度的功能通过有限组合得到
> 组成得到 tool 的更加细粒度的功能,大模型并不会完全使用,或者使用不可靠 (比如汇编语言,比如 DOM 查询)
For the above process, weve identified two areas where the LLM hallucinates:
扩充额外的原子 tool能够让大模型知道 “啊!我还有别的手段可以耍!” ,那么只要 description 比较恰当,大模型就能够使用它们来获得额外的信息,而不是产生「幻觉」让任务失败。
1. Getting the label of a node type (e.g., asking for "comments," the LLM doesnt admit it doesnt know the label and directly uses `Comment`, but the actual label is `BlogComment`).
2. Getting the fields of a node type (e.g., asking for the "latest comments," the LLM sorts by `createdAt`, but the field recording creation time is `createdTime`).
对于上面的一整套流程,我们目前知道了如下两个技能大模型是会产生「幻觉」的:
1. 获取一个节点类别的标签(询问评论,大模型没说自己不知道什么是评论标签,而是直接使用了 Comment但是实际的评论标签是 BlogComment
2. 获取一个节点类别的字段(询问最新评论,大模型选择通过 createAt 排序,但是记录 BlogComment 创建时间的字段是 createTime
在之前,我们通过了 system prompt 来完成了信息的注入,现在,丢弃你的 system prompt 吧!我们来玩点更加有趣的游戏。在刚刚的 util.go 中,我们针对上面的两个幻觉,实现两个额外的函数 (经过测试cursor或者trae能完美生成下面的代码可以不用自己写)
Earlier, we used the system prompt to inject this knowledge. Now, discard your system prompt! Lets play a more interesting game. In `util.go`, well implement two additional functions for the above hallucinations (tested, cursor or trae can generate this code perfectly, so you dont need to write it yourself):
```go
// 获取所有的节点类型
func GetAllNodeTypes() ([]string, error) {
cypher := "MATCH (n) RETURN DISTINCT labels(n) AS labels"
result, err := ExecuteReadOnlyCypherQuery(cypher)
if err!= nil {
return nil, err
}
var nodeTypes []string
for _, record := range result {
labels := record["labels"].([]any)
for _, label := range labels {
nodeTypes = append(nodeTypes, label.(string))
}
}
return nodeTypes, nil
}
// Get all node types
func GetAllNodeTypes() ([]string, error) {
cypher := "MATCH (n) RETURN DISTINCT labels(n) AS labels"
result, err := ExecuteReadOnlyCypherQuery(cypher)
if err != nil {
return nil, err
}
var nodeTypes []string
for _, record := range result {
labels := record["labels"].([]any)
for _, label := range labels {
nodeTypes = append(nodeTypes, label.(string))
}
}
return nodeTypes, nil
}
// 获取一个节点的字段示范
func GetNodeFields(nodeType string) ([]string, error) {
cypher := fmt.Sprintf("MATCH (n:%s) RETURN keys(n) AS keys LIMIT 1", nodeType)
result, err := ExecuteReadOnlyCypherQuery(cypher)
if err!= nil {
return nil, err
}
var fields []string
for _, record := range result {
keys := record["keys"].([]any)
for _, key := range keys {
fields = append(fields, key.(string))
}
}
return fields, nil
}
```
// Get an example of a node's fields
func GetNodeFields(nodeType string) ([]string, error) {
cypher := fmt.Sprintf("MATCH (n:%s) RETURN keys(n) AS keys LIMIT 1", nodeType)
result, err := ExecuteReadOnlyCypherQuery(cypher)
if err != nil {
return nil, err
}
var fields []string
for _, record := range result {
keys := record["keys"].([]any)
for _, key := range keys {
fields = append(fields, key.(string))
}
}
return fields, nil
}
```
在 main.go 中完成它们的 schema 的申明和 tool 的注册:
In `main.go`, declare their schemas and register the tools:
```go
// ... existing code ...
// ... existing code ...
getAllNodeTypes := mcp.NewTool("getAllNodeTypes",
mcp.WithDescription("获取所有的节点类型"),
)
getAllNodeTypes := mcp.NewTool("getAllNodeTypes",
mcp.WithDescription("Get all node types"),
)
getNodeField := mcp.NewTool("getNodeField",
mcp.WithDescription("获取节点的字段"),
mcp.WithString("nodeLabel",
mcp.Required(),
mcp.Description("节点的标签"),
),
)
getNodeField := mcp.NewTool("getNodeField",
mcp.WithDescription("Get the fields of a node"),
mcp.WithString("nodeLabel",
mcp.Required(),
mcp.Description("The label of the node"),
),
)
// 注册对应的工具到 schema 上
s.AddTool(getAllNodeTypes, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
result, err := util.GetAllNodeTypes()
// Register the tools with the schema
s.AddTool(getAllNodeTypes, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
result, err := util.GetAllNodeTypes()
fmt.Println(result)
if err != nil {
return mcp.NewToolResultText(""), err
}
return mcp.NewToolResultText(fmt.Sprintf("%v", result)), nil
})
fmt.Println(result)
s.AddTool(getNodeField, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
args, ok := request.Params.Arguments.(map[string]interface{})
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("invalid arguments type")
}
nodeLabel, ok := args["nodeLabel"].(string)
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("nodeLabel argument is not a string")
}
result, err := util.GetNodeFields(nodeLabel)
fmt.Println(result)
if err!= nil {
return mcp.NewToolResultText(""), err
}
return mcp.NewToolResultText(fmt.Sprintf("%v", result)), nil
})
if err != nil {
return mcp.NewToolResultText(""), err
}
// ... existing code ...
```
return mcp.NewToolResultText(fmt.Sprintf("%v", result)), nil
})
重新运行 sse 服务器,然后直接询问大模型,此时,我们取消使用 system prompt创建一个空的或者直接把当前的 prompt 删除),询问结果如下:
s.AddTool(getNodeField, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
args, ok := request.Params.Arguments.(map[string]interface{})
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("invalid arguments type")
}
nodeLabel, ok := args["nodeLabel"].(string)
if !ok {
return mcp.NewToolResultText(""), fmt.Errorf("nodeLabel argument is not a string")
}
result, err := util.GetNodeFields(nodeLabel)
<div align=center>
<img src="https://picx.zhimg.com/80/v2-1e88f7d8e04b949040a02673c13d6462_1440w.png" style="width: 80%;"/>
</div>
fmt.Println(result)
if err != nil {
return mcp.NewToolResultText(""), err
}
可以看到,在没有 system prompt 的情况下,大模型成功执行了这个过程,非常完美。
return mcp.NewToolResultText(fmt.Sprintf("%v", result)), nil
})
## 总结
// ... existing code ...
```
这期教程,带大家使用 go 走完了 mcp sse 的连接方式,并且做出了一个「只读 neo4j 数据库」的 mcp通过这个 mcp我们可以非常方便地用自然语言查询数据库的结果而不需要手动输入 cypher。
Restart the SSE server and directly ask the LLM. This time, well disable the system prompt (create an empty one or delete the current prompt). The result is as follows:
对于部分情况下,大模型因为「幻觉」问题而导致的任务失败,我们通过一步步有逻辑可遵循的方法论,完成了 system prompt 的调优和知识的封装。最终,通过范式化的原子化扩充的方式,将这些知识包装成了更加完善的 mcp 服务器。这样,任何人都可以直接使用你的 mcp 服务器来完成 neo4j 数据库的自然语言查询了。
<div align=center>
<img src="https://picx.zhimg.com/80/v2-1e88f7d8e04b949040a02673c13d6462_1440w.png" style="width: 80%;"/>
</div>
最后,觉得 openmcp 好用的米娜桑,别忘了给我们的项目点个 starhttps://github.com/LSTM-Kirigaya/openmcp-client
As you can see, without the system prompt, the LLM successfully executed the process. Perfect.
想要和我进一步交流 OpenMCP 的朋友可以进入我们的交流群github 项目里面有)
---
## Summary
This tutorial walked you through implementing an MCP server for a read-only Neo4j database using Go and SSE. With this MCP, you can easily query the database using natural language without manually writing Cypher queries.
For cases where the LLM fails due to hallucinations, we methodically refined the system prompt to encapsulate knowledge. Finally, through the paradigm of Atomization Supplement, we packaged this knowledge into a more robust MCP server. Now, anyone can use your MCP server to query the Neo4j database with natural language.
Finally, if you find OpenMCP useful, dont forget to star our project: [https://github.com/LSTM-Kirigaya/openmcp-client](https://github.com/LSTM-Kirigaya/openmcp-client)
To further discuss OpenMCP, join our communication group (link in the GitHub repo).

View File

@ -1,28 +1,40 @@
---
next:
text: python 实现天气信息 mcp 服务器 (STDIO)
text: Python Weather MCP Server (STDIO)
link: '/plugin-tutorial/examples/python-simple-stdio'
---
# MCP 服务器开发案例
# MCP Server Development Examples
## Python
- [python 实现天气信息 mcp 服务器 (STDIO)](./python-simple-stdio)
- [python 实现进行通用表单填充 的 mcp (STDIO)](./python-form-stdio)
- [python 实现基于 blender 的 mcp (STDIO)](./python-blender-stdio)
- [python 实现 cadence EDA 的 mcp (STDIO)](./python-cadence-stdio)
- 基于 ffmpeg mcp 实现通过对话的视频剪辑
- 基于 rag mcp 实现知识库的注入
- 实现 Stable Diffusion 的 MCP 服务器
- [Weather Information MCP Server (STDIO)](./python-simple-stdio)
Python implementation for weather data queries via STDIO
- [Universal Form Filling MCP (STDIO)](./python-form-stdio)
Automated form population solution using Python
- [Blender Integration MCP (STDIO)](./python-blender-stdio)
3D modeling automation through Blender commands
- [Cadence EDA MCP (STDIO)](./python-cadence-stdio)
Electronic design automation interface
- Video Editing via FFmpeg MCP
AI-driven video editing workflows
- Knowledge Base Injection with RAG MCP
Dynamic document retrieval system
- Stable Diffusion MCP Server
Text-to-image generation service
## Nodejs
- [typescript 实现基于 crawl4ai 的超级网页爬虫 mcp (STDIO)](./typescript-crawl4ai-stdio)
## Node.js
- [Crawl4AI Web Crawler MCP (STDIO)](./typescript-crawl4ai-stdio)
TypeScript implementation for advanced web scraping
## Go
- [go 实现 neo4j 的只读 mcp 服务器 (SSE)](./go-neo4j-sse)
- [Neo4j Read-Only MCP (SSE)](./go-neo4j-sse)
Graph database query interface using Server-Sent Events
## Java
- [java 实现文档数据库的只读 mcp (HTTP)](./java-es-http)
- [Document Database MCP (HTTP)](./java-es-http)
Elasticsearch integration with HTTP transport
## Authentication
- [OAuth2 MCP Server (SSE)](./sse-oauth2)
Secure authentication implementation example
## 认证
- [SSE 方式的 OAuth2 认证 mcp 服务器示例](./sse-oauth2)

View File

@ -1,344 +1,131 @@
# python 实现天气信息 mcp 服务器
# Implementing a Weather Information MCP Server in Python
[本期教程视频](https://www.bilibili.com/video/BV1zYGozgEHc)
[Video Tutorial](https://www.bilibili.com/video/BV1zYGozgEHc)
## hook
## The Problem Scenario
等等,开始前,先让我们看一个小例子,假设我下周要去明日方舟锈影新生的漫展,所以我想要知道周六杭州的天气,于是我问大模型周六的天气,结果大模型给了我如下的回复:
Before we begin, consider this common situation: I'm planning to attend the Arknights "Rusty Shadows" convention next Saturday in Hangzhou and want to check the weather forecast. When I ask an LLM about Saturday's weather, I get this response:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-4c623ac6897e12093535b0d9ed9cf242_1440w.png" style="width: 100%;"/>
</div>
这可不行,相信朋友们也经常遇到过这样的情况,大模型总是会“授人以渔”,但是有的时候,我们往往就是想要直接知道最终结果,特别是一些无聊的生活琐事。
This "teach you to fish" approach isn't helpful for simple everyday queries. While there are many weather apps available, how can we integrate weather data directly into LLMs to get actionable answers?
其实实现天气预报的程序也很多啦,那么有什么方法可以把写好的天气预报的程序接入大模型,让大模型告诉我们真实的天气情况,从而选择明天漫展的穿搭选择呢?
## Introduction
如果直接写函数用 function calling 显得有点麻烦这里面涉及到很多麻烦的技术细节需要我们商榷比如大模型提供商的API调用呀任务循环的搭建呀文本渲染等等从而浪费我们宝贵的时间。而 MCP 给了我们救赎之道,今天这期教程,就教大家写一个简单的 MCP 服务器,可以让大模型拥有得知天气预报的能力。
👉 [Previous Guide](https://zhuanlan.zhihu.com/p/32593727614)
In our last tutorial, we covered MCP fundamentals. Now, we'll develop our first MCP server to bridge existing applications/services with LLMs.
For efficient development, we'll use OpenMCP - an integrated MCP testing tool I recently open-sourced:
## 前言
[OpenMCP Announcement](https://zhuanlan.zhihu.com/p/1894785817186121106)
OpenMCP GitHub: https://github.com/LSTM-Kirigaya/openmcp-client
(Stars appreciated! :D)
👉 [上篇导航](https://zhuanlan.zhihu.com/p/32593727614)
### Initial Setup
在上篇,我们简单讲解了 MCP 的基础,在这一篇,我们将正式开始着手开发我们自己的 MCP 服务器,从而将现成的应用,服务,硬件等等接入大模型。从而走完大模型到赋能终端应用的最后一公里。
工欲善其事,必先利其器。为了更加优雅快乐地开发 MCP 服务器,我们需要一个比较好的测试工具,允许我们在开发的过程看到程序的变化,并且可以直接接入大模型验证工具的有效性。
于是,我在前不久开源了一款一体化的 MCP 测试开发工具 —— OpenMCP[全网第一个 MCP 服务器一体化开发测试软件 OpenMCP 发布!](https://zhuanlan.zhihu.com/p/1894785817186121106)
> OpenMCP QQ 群 782833642
OpenMCP 开源链接https://github.com/LSTM-Kirigaya/openmcp-client
求个 star :D
### 第一个 MCP 项目
事已至此,先 coding 吧 :D
在打开 vscode 或者 trae 之前,先安装基本的 uv 工具uv 是一款社区流行的版本管理工具,你只需要把它理解为性能更好的 conda 就行了。
我们先安装 uv如果您正在使用 anaconda一定要切换到 base 环境,再安装:
First, install the UV tool (a Conda alternative):
```bash
pip install uv
uv # Verify installation
```
安装完成后,运行 uv
```bash
uv
```
没有报错就说明成功。uv 只会将不可以复用的依赖安装在本地,所以使用 anaconda 的朋友不用担心uv 安装的依赖库会污染你的 base我们接下来使用 uv 来创建一个基础的 python 项目
Create a new project:
```bash
mkdir simple-mcp && cd simple-mcp
uv init
uv add mcp "mcp[cli]"
```
然后我们打开 vscode 或者 trae在插件商城找到并下载 OpenMCP 插件
Install the OpenMCP plugin in VSCode:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-525c4576398078547fdd6eeef26532aa_1440w.png" style="width: 100%;"/>
</div>
先制作一个 MCP 的最小程序:
### Basic MCP Server
文件名simple_mcp.py
Create `simple_mcp.py`:
```python
from mcp.server.fastmcp import FastMCP
mcp = FastMCP('锦恢的 MCP Server', version="11.45.14")
@mcp.tool(
name='add',
description='对两个数字进行实数域的加法'
)
def add(a: int, b: int) -> int:
return a + b
@mcp.resource(
uri="greeting://{name}",
name='greeting',
description='用于演示的一个资源协议'
)
def get_greeting(name: str) -> str:
# 访问处理 greeting://{name} 资源访问协议,然后返回
# 此处方便起见,直接返回一个 Hellobalabala 了
return f"Hello, {name}!"
@mcp.prompt(
name='translate',
description='进行翻译的prompt'
)
def translate(message: str) -> str:
return f'请将下面的话语翻译成中文:\n\n{message}'
mcp = FastMCP('Weather MCP Server', version="1.0.0")
@mcp.tool(
name='weather',
description='获取指定城市的天气信息'
description='Get weather information for specified city'
)
def get_weather(city: str) -> str:
"""模拟天气查询协议,返回格式化字符串"""
"""Weather query protocol - returns formatted string"""
return f"Weather in {city}: Sunny, 25°C"
@mcp.resource(
uri="user://{user_id}",
name='user_profile',
description='获取用户基本信息'
)
def get_user_profile(user_id: str) -> dict:
"""模拟用户协议,返回字典数据"""
return {
"id": user_id,
"name": "张三",
"role": "developer"
}
@mcp.resource(
uri="book://{isbn}",
name='book_info',
description='通过ISBN查询书籍信息'
)
def get_book_info(isbn: str) -> dict:
"""模拟书籍协议,返回结构化数据"""
return {
"isbn": isbn,
"title": "Python编程从入门到实践",
"author": "Eric Matthes"
}
@mcp.prompt(
name='summarize',
description='生成文本摘要的提示词模板'
)
def summarize(text: str) -> str:
"""返回摘要生成提示词"""
return f"请用一句话总结以下内容:\n\n{text}"
# Additional example tools/resources omitted for brevity
```
我们试着运行它:
Test the server:
```bash
uv run mcp run simple_mcp.py
```
如果没有报错,但是卡住了,那么说明我们的依赖安装没有问题,按下 ctrl c 或者 ctrl z 退出即可。
### Connecting with OpenMCP
在阁下看起来,这些函数都简单到毫无意义,但是请相信我,我们总需要一些简单的例子来通往最终的系统。
### Link, Start!
如果你下载了 OpenMCP 插件,那么此时你就能在打开的 python 编辑器的右上角看到 OpenMCP 的紫色图标,点击它就能启动 OpenMCP调试当前的 MCP 了。
<div align=center>
<img src="https://picx.zhimg.com/80/v2-f67e000371095a755d2f0d613706d61c_1440w.png" style="width: 100%;"/>
</div>
默认是以 STDIO 的方式启动,默认运行如下的命令:
```bash
uv run mcp run <当前打开的 python 文件的相对路径>
```
所以你需要保证已经安装了 mcp 脚手架,也就是 `uv add mcp "mcp[cli]"`
打开后第一件事就是先看左下角连接状态,确保是绿色的,代表当前 OpenMCP 和你的 MCP 服务器已经握手成功。
Click the purple OpenMCP icon in VSCode to launch the debugger. Verify connection status (green indicator):
<div align=center>
<img src="https://picx.zhimg.com/80/v2-c4ebbbfe98d51e8b6e7de6c6d1bceb2e_1440w.png" style="width: 100%;"/>
</div>
如果连接成功,此时连接上方还会显示你当前的 MCP 服务器的名字,光标移动上去还能看到版本号。这些信息由我们如下的代码定义:
## Developing the Weather Function
```python
mcp = FastMCP('锦恢的 MCP Server', version="11.45.14")
```
### Tool Debugging
这在我们进行版本管理的时候会非常有用。请善用这套系统。
如果连接失败,可以点击左侧工具栏的第二个按钮,进入连接控制台,查看错误信息,或是手动调整连接命令:
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-684190b98dbbb9a7bf0e8c8048bd1277_1440w.png" style="width: 100%;"/>
</div>
### 初识 OpenMCP
接下来,我来简单介绍一下 OpenMCP 的基本功能模块,如果一开始,你的屏幕里什么也没有,先点击上面的加号创建一个新的标签页,此处页面中会出现下图屏幕中的四个按钮
<div align=center>
<img src="https://picx.zhimg.com/80/v2-3a4e8aa1ddaac632601532bb757a15ad_1440w.png?source=d16d100b" style="width: 100%;"/>
</div>
放大一点
<div align=center>
<img src="https://picx.zhimg.com/80/v2-ecc0705ed534e2cf0bc748ecd95f5f22_1440w.png" style="width: 100%;"/>
</div>
前三个,资源、提词和工具,分别用于调试 MCP 中的三个对应项目,也就是 ResourcesPrompts 和 Tools这三个部分的使用基本和 MCP 官方的 Inspector 工具是一样的,那是自然,我就照着这几个抄的,诶嘿。
<div align=center>
<img src="https://pica.zhimg.com/80/v2-d767e782f667161442ea183f55ca49b1_1440w.png" style="width: 100%;"/>
</div>
然后第四个按钮「交互测试」,它是一个我开发的 MCP 客户端,其实就是一个对话窗口,你可以无缝衔接地直接在大模型中测试你当前的 MCP 服务器的功能函数。
<div align=center>
<img src="https://picx.zhimg.com/80/v2-b59ee2d290e096343fb4659baf34cf57_1440w.png" style="width: 100%;"/>
</div>
目前我暂时只支持 tools 的支持,因为 prompts 和 resources 的我还没有想好resource 感觉就是可以当成一个 tool欢迎大家进群和我一起讨论QQ群 782833642
## 开始调试天气函数
### 工具调试
还记得我们一开始给的 mcp 的例子吗?我们可以通过 OpenMCP 来快速调试这里面写的函数,比如我们本期的目标,写一个天气预报的 MCP那么假设我们已经写好了一个天气预报的函数了我们把它封装成一个 tool
```python
@mcp.tool(
name='weather',
description='获取指定城市的天气信息'
)
def get_weather(city: str) -> str:
"""模拟天气查询协议,返回格式化字符串"""
return f"Weather in {city}: Sunny, 25°C"
```
当然它现在是没有意义的因为就算把黑龙江的城市ID输入它也返回 25 度,但是这些都不重要,我想要带阁下先走完整套流程。建立自上而下的感性认知比死抠细节更加容易让用户学懂。
那么我们现在需要调试这个函数,打开 OpenMCP新建一个「工具」调试项目
<div align=center>
<img src="https://picx.zhimg.com/80/v2-1c67ab54d67023e408413484768377cf_1440w.png" style="width: 100%;"/>
</div>
然后此时,你在左侧的列表可以看到 weather 这个工具,选择它,然后在右侧的输入框中随便输入一些东西,按下回车(或者点击「运行」),你能看到如下的响应:
Our initial weather tool just returns static data. Let's test it in OpenMCP:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-d32a9c0d9fcab497dc03152a72c4c62b_1440w.png" style="width: 100%;"/>
</div>
看到我们函数 return 的字符串传过来了,说明没问题,链路通了。
### Interactive Testing
### 交互测试
诶?我知道你编程很厉害,但是,在噼里啪啦快速写完天气预报爬虫前,我们现在看看我们要如何把已经写好的工具注入大模型对话中。为了使用大模型,我们需要先选择大模型和对应的 API点击左侧工具栏的第三个按钮进入 API 模块,选择你想要使用的大模型运营商、模型,填写 API token然后点击下面的「保存」
Configure your LLM API in OpenMCP:
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-367780b204d2aa50354585272b71af20_1440w.png" style="width: 100%;"/>
</div>
再新建一个标签页,选择「交互测试」,此时,我们就可以直接和大模型对话了,我们先看看没有任何工具注入的大模型会如何回应天气预报的问题,点击最下侧工具栏从左往右第三个按钮,进入工具选择界面,选择「禁用所有工具」
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-977a53ea14eae5e1a646fc73d379a422_1440w.png" style="width: 100%;"/>
</div>
点击「关闭」后,我们问大模型一个问题:
Test without tools:
```
请问杭州的温度是多少?
What's the temperature in Hangzhou?
```
<div align=center>
<img src="https://pic1.zhimg.com/80/v2-d3aa56602f574a6968295f9a5c93438f_1440w.png" style="width: 100%;"/>
</div>
可以看到,大模型给出了和文章开头一样的回答。非常敷衍,因为它确实无法知道。
此处我们再单独打开「weather」工具
<div align=center>
<img src="https://picx.zhimg.com/80/v2-2ed66eaff604d11d52f60201fca215d4_1440w.png" style="width: 100%;"/>
</div>
问出相同的问题:
Then with our weather tool enabled:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-e934d386e20b1de43fb5e0dd426de86e_1440w.png" style="width: 100%;"/>
</div>
可以看到,大模型给出了回答是 25 度,还有一些额外的推导信息。
Notice the two-step process:
1. LLM calls our weather tool with `{"city": "Hangzhou"}`
2. Our server responds with formatted weather data
3. LLM generates final answer
我们不妨关注一些细节,首先,大模型并不会直接回答问题,而是会先去调用 weather 这个工具,调用参数为:
## Production-Ready Implementation
```json
{
"city": "杭州"
}
```
然后,我们的 MCP 服务器给出了响应:
```
Weather in 杭州: Sunny, 25°C
```
从而,最终大模型才根据这些信息给出了最终的回答。也就是,这个过程我们实际调用了两次大模型的服务。而且可以看到两次调用的输入 token 数量都非常大,这是因为 OpenMCP 会将函数调用以 JSON Schema 的形式注入到请求参数中weather 这个工具的 JSON Schema 如下图的右侧的 json 所示:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-2ed66eaff604d11d52f60201fca215d4_1440w.png" style="width: 100%;"/>
</div>
然后支持 openai 协议的大模型厂商都会针对这样的信息进行 function calling所以使用了工具的大模型请求的输入 token 数量都会比较大。但是不需要担心,大部分厂商都实现了 KV Cache对相同前缀的输入存在缓存缓存命中部分的费用开销是显著低于普通的 输入输出 token 价格的。OpenMCP 在每个回答的下面都表明了当次请求的 输入 token输出 token总 token 和 缓存命中率。
其中
- 「总 token」 = 「输入 token」 + 「输出 token」
- 「缓存命中率」 = 「缓存命令的 token」 / 「输入 token」
> 没错,缓存命中率 是对于输入 token 的概念,输出 token 是没有 缓存命中率这个说法的。
在后续的开发中,你可以根据这些信息来针对性地对你的服务或者 prompt 进行调优。
### 完成一个真正的天气预报吧!
当然,这些代码也非常简单,直接让大模型生成就行了(其实大模型是无法生成免 API 的 python 获取天气的代码的,我是直接让大模型把我个人网站上天气预报的 go 函数翻译了一下)
我直接把函数贴上来了:
Here's a complete weather implementation using a real API:
```python
import requests
import json
from typing import NamedTuple, Optional
from mcp.server.fastmcp import FastMCP
class CityWeather(NamedTuple):
city_name_en: str
city_name_cn: str
city_name_cn: str
city_code: str
temp: str
wd: str
@ -347,37 +134,23 @@ class CityWeather(NamedTuple):
aqi: str
weather: str
def get_city_weather_by_city_name(city_code: str) -> Optional[CityWeather]:
"""根据城市名获取天气信息"""
if not city_code:
print(f"找不到{city_code}对应的城市")
return None
def get_city_weather(city_code: str) -> Optional[CityWeather]:
"""Get weather by city code"""
try:
# 构造请求URL
url = f"http://d1.weather.com.cn/sk_2d/{city_code}.html"
# 设置请求头
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0",
"User-Agent": "Mozilla/5.0...",
"Host": "d1.weather.com.cn",
"Referer": "http://www.weather.com.cn/"
}
# 发送HTTP请求
response = requests.get(url, headers=headers)
response.raise_for_status()
# 解析JSON数据
# 解析JSON数据前先处理编码问题
content = response.text.encode('latin1').decode('unicode_escape')
json_start = content.find("{")
json_str = content[json_start:]
json_str = content[content.find("{"):]
weather_data = json.loads(json_str)
# 构造返回对象
return CityWeather(
city_name_en=weather_data.get("nameen", ""),
city_name_cn=weather_data.get("cityname", "").encode('latin1').decode('utf-8'),
@ -389,50 +162,31 @@ def get_city_weather_by_city_name(city_code: str) -> Optional[CityWeather]:
aqi=weather_data.get("aqi", ""),
weather=weather_data.get("weather", "").encode('latin1').decode('utf-8')
)
except Exception as e:
print(f"获取天气信息失败: {str(e)}")
print(f"Weather query failed: {str(e)}")
return None
from mcp.server.fastmcp import FastMCP
mcp = FastMCP('weather', version="0.0.1")
mcp = FastMCP('Weather MCP', version="1.0.0")
@mcp.tool(
name='get_weather_by_city_code',
description='根据城市天气预报的城市编码 (int),获取指定城市的天气信息'
description='Get weather by city code (integer)'
)
def get_weather_by_code(city_code: int) -> str:
"""模拟天气查询协议,返回格式化字符串"""
city_weather = get_city_weather_by_city_name(city_code)
return str(city_weather)
weather_data = get_city_weather(str(city_code))
return str(weather_data)
```
Key Notes:
1. Use `int` type for numeric IDs to ensure proper JSON serialization
2. Follow Python naming conventions for tool names
这里有几个点一定要注意:
1. 如果你的输入参数是数字,就算是城市编码这种比较长的数字,请一定定义成 int因为 mcp 底层的是要走 JSON 正反序列化的,而 "114514" 这样的字符串会被 JSON 反序列化成 114514而不是 "114514" 这个字符串。你实在要用 str 来表示一个很长的数字,那么就在前面加一个前缀,比如 "code-114514",避免被反序列化成数字,从而触发 mcp 内部的 type check error
2. tool 的 name 请按照 python 的变量命名要求进行命名,否则部分大模型厂商会给你报错。
好,我们先测试一下:
<div align=center>
<img src="https://picx.zhimg.com/80/v2-d2dbe925010b676482ee57258c14fca7_1440w.png" style="width: 100%;"/>
</div>
可以看到,我们的天气查询工具已经可以正常工作了。
那么接下来,我们就可以把这个工具注入到大模型中了。点击 「交互测试」,只激活当前这个工具,然后询问大模型:
```
请问杭州的天气是多少?
```
### Final Test
<div align=center>
<img src="https://picx.zhimg.com/80/v2-e581c6461190b358adda50ce83633520_1440w.png" style="width: 100%;"/>
</div>
完美!
Success! We've built a fully functional weather MCP service. For production deployments, consider using SSE connections for better scalability (covered in future tutorials).
如此,我们便完成了一个天气查询工具的开发。并且轻松地注入到了我们的大模型中。在实际提供商业级部署方案的时候,虽然 mcp 目前的 stdio 冷启动速度足够快但是考虑到拓展性等多方面因素SSE 还是我们首选的连接方案,关于 SSE 的使用,我们下期再聊。
OpenMCP 开源链接https://github.com/LSTM-Kirigaya/openmcp-client
OpenMCP GitHub: https://github.com/LSTM-Kirigaya/openmcp-client

View File

@ -1,27 +1,26 @@
---
layout: doc
---
# 常见问题解答
## layout: doc
## 错误代码说明
# FAQ
### 32000 - MCP 连接失败
## Error Code Explanation
MCP 连接失败可能有多种原因,以下是一些常见情况:
### 32000 - MCP Connection Failed
• **虚拟环境路径不匹配**
MCP connection failure can have multiple causes. Here are some common situations:
虚拟环境venv与入口文件路径不匹配是导致连接失败的常见原因之一。
• **Virtual Environment Path Mismatch**
详细的解决方案请参考:[配置说明](./venv-not-same-path/venv-not-same-path.md)
A mismatch between the virtual environment (venv) and the entry file path is one of the common reasons for connection failure.
For detailed solutions, please refer to: [Configuration Guide](./venv-not-same-path/venv-not-same-path.md)
---
• **其他可能的原因**
• **Other Possible Causes**
- 端口被占用
- 环境变量配置错误
- 依赖库未正确安装
* Port is occupied
* Environment variables are misconfigured
* Dependencies are not properly installed
> 如果您遇到以上问题,请先查看错误日志获取详细信息。如果问题仍然存在,可以在 [GitHub Issues](https://github.com/LSTM-Kirigaya/openmcp-client/issues) 中寻求帮助。
> If you encounter the above issues, please first check the error logs for more details. If the problem persists, you can seek help on [GitHub Issues](https://github.com/LSTM-Kirigaya/openmcp-client/issues).

View File

@ -1,29 +1,29 @@
# 虚拟环境与入口文件不在同一目录时的配置方式
# Configuration When Virtual Environment and Entry File Are in Different Directories
## 问题描述
## Problem Description
在使用 OpenMCP 时有时会遇到虚拟环境venv与 Python 文件不在同一目录的情况,甚至虚拟环境可能位于项目文件夹之外。这种情况下,点击右上角连接按钮可能会出现 MCP 连接失败错误代码32000的问题。
When using OpenMCP, you may sometimes encounter situations where the virtual environment (venv) and the Python file are not located in the same directory, or the virtual environment might even be outside the project folder. In such cases, clicking the connect button on the top right may result in an MCP connection failure (Error Code: 32000).
## 解决方案
## Solution
### 1. 调整执行目录
### 1. Adjust the Execution Directory
在连接选项中,您需要调整执行目录到虚拟环境所在的位置:
In the connection options, you need to set the execution directory to where the virtual environment is located:
![MCP 连接选项界面](./image-2.png)
![MCP Connection Options Interface](./image-2.png)
### 2. 修改执行命令
### 2. Modify the Execution Command
同时,需要相应地修改执行命令:
At the same time, adjust the execution command accordingly:
![修改执行命令示例](./image.png)
![Example of Modified Execution Command](./image.png)
### 3. 直接指定解释器路径
### 3. Directly Specify the Interpreter Path
对于特定情况,您可以直接在命令中指定 Python 解释器的完整路径,例如:
For certain cases, you can directly specify the full path of the Python interpreter in the command, for example:
```bash
C:\code\ygo-chat\.venv\Scripts\python.exe example.py
```
> 注意:此方法同样适用于 node或者mcp指令的【命令】以及其它mcp client的mcp配置文件。
> Note: This method also applies to the command field for node or mcp instructions, as well as other MCP client configuration files.

BIN
plugin-tutorial/images/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -1,43 +1,41 @@
---
next:
text: 什么是 MCP
text: What is MCP?
link: '/plugin-tutorial/what-is-mcp'
---
# OpenMCP 概述
# OpenMCP Overview
:::warning
在正式开始 OpenMCP 的学习之前,我们强烈推荐您先了解一下 MCP 的基本概念:[Agent 时代基础设施 | MCP 协议介绍](https://kirigaya.cn/blog/article?seq=299)
Before starting with OpenMCP, we strongly recommend understanding the basic concepts of MCP: [Agent Era Infrastructure | MCP Protocol Introduction](https://kirigaya.cn/blog/article?seq=299)
:::
## 什么是 OpenMCP
## What is OpenMCP
OpenMCP 是一个面向开发者的 MCP 调试器和 SDK致力于降低 AI Agent 的全链路开发成本和开发人员的心智负担。
OpenMCP is a developer-oriented MCP debugger and SDK, dedicated to reducing the full-chain development costs of AI Agents and developers' cognitive load.
![](./images/openmcp.png)
OpenMCP 分为两个部分,但是本板块讲解的是 OpenMCP 调试器的部分的使用,这部分也被我们称为 OpenMCP Client。OpenMCP Client 的本体是一个可在类 vscode 编辑器上运行的插件。它兼容了目前 MCP 协议的全部特性,且提供了丰富的利于开发者使用的功能,可以作为 Claude Inspector 的上位进行使用。
OpenMCP consists of two parts, but this section focuses on using the OpenMCP debugger portion, which we call OpenMCP Client. The OpenMCP Client is a plugin that runs on vscode-like editors. It's compatible with all current MCP protocol features and provides rich developer-friendly functionality, serving as an enhanced replacement for Claude Inspector.
:::info 类 vscode 编辑器 (VLE)
类 vscode 编辑器 (vscode-like editor简称 VLE) 是指基于 Vscodium 内核开发的通用型代码编辑器它们都能够兼容的大部分的vscode插件生态并且具有类似 vscode 的功能(比如支持 LSP3.7 协议、拥有 remote ssh 进行远程开发的能力、拥有跨编辑器的配置文件)。
:::info Vscode-like Editors (VLE)
Vscode-like editors (VLE) refer to general-purpose code editors developed based on the Vscodium core. They can mostly support the vscode plugin ecosystem and have similar functionality to vscode (such as supporting LSP3.7 protocol, remote SSH for development, and cross-editor configuration files).
比较典型的 VLE 有vscode, trae, cursor 和 vscodium 各类发行版本。
Typical VLEs include: vscode, trae, cursor, and various Vscodium distributions.
:::
## 什么是 Claude Inspector
## What is Claude Inspector
Claude Inspector 是一款 Claude 官方(也就是 MCP 协议的提出者)发布的开源 MCP 调试器,开发者在开发完 MCP 服务器后,可以通过这款调试器来测试功能完整性。
Claude Inspector is an open-source MCP debugger released by Claude (the proposer of the MCP protocol). Developers can use this debugger to test functionality after developing an MCP server.
![](./images/inspector.png)
但是 Inspector 工具存在如下几个缺点:
However, Inspector has several drawbacks:
- 使用麻烦:使用 Inspector 每次都需要通过 mcp dev 启动一个 web 前后端应用
- 功能少Inspector 只提供了最为基础的 MCP 的 tool 等属性的调试。如果用户想要测试自己开发的 MCP 服务器在大模型的交互下如何,还需要连接进入 Claude Desktop 并重启客户端,对于连续调试场景,非常不方便。
- 存在部分 bug对于 SSE 和 streamable http 等远程连接的场景Inspector 存在已知 bug这对真实工业级开发造成了极大的影响。
- 无法对调试内容进行保存和留痕:在大规模微服务 mcp 化的项目中,这非常重要。
- 无法同时调试多个 mcp 服务器:在进行 mcp 原子化横向拓展的场景中,这是一项必要的功能。
- Cumbersome to use: Requires starting a web application via `mcp dev` each time
- Limited features: Only provides basic debugging of MCP tool properties. Testing MCP server interactions with large models requires connecting to Claude Desktop and restarting the client, which is inconvenient for continuous debugging scenarios.
- Contains bugs: Known issues with SSE and streamable HTTP remote connections significantly impact real-world industrial development.
- No debugging content saving/tracing: Critical for large-scale microservice MCP projects.
- Cannot debug multiple MCP servers simultaneously: Essential for MCP atomic horizontal scaling scenarios.
而 OpenMCP Client 被我们制作出来的一个原因就是为了解决 Inspector 上述的痛点,从而让 mcp 服务器的开发门槛更低,用户能够更加专注于业务本身。
<!-- -->
The OpenMCP Client was developed to address these Inspector pain points, lowering MCP server development barriers and allowing users to focus more on business logic.

View File

@ -1,50 +1,46 @@
---
layout: doc
---
# Getting OpenMCP
## Installing OpenMCP from the Plugin Marketplace
# 获取 OpenMCP
You can directly obtain the OpenMCP plugin from the plugin marketplace of mainstream VLEs. For example, in vscode, click on the left side's plugin marketplace, and then search for `OpenMCP` in the search bar to find the OpenMCP plugin.
## 在插件商城中安装 OpenMCP
![vscode Plugin Marketplace](./images/vscode-plugin-market.png)
你可以在主流 VLE 的插件商城直接获取 OpenMCP 插件。比如在 vscode 中,点击左侧的插件商城,然后在搜索框中输入 `OpenMCP` 即可找到 OpenMCP 插件。
## Offline Installation
![vscode 插件商城](./images/vscode-plugin-market.png)
## 离线安装
VLE 的插件本质是一个 zip 压缩包,后缀名为 vsix全平台通用。我们的 CI/CD 机器人在每次版本发布后,会自动构建并上传 vsix 到 github release你可以通过如下的链接访问到对应版本的 github release 页面:
VLE plugins are essentially zip files with the `.vsix` extension, which are cross-platform. Our CI/CD bot automatically builds and uploads the `.vsix` file to GitHub Releases after every version release. You can access the corresponding GitHub release page for each version through the following link:
```
https://github.com/LSTM-Kirigaya/openmcp-client/releases/tag/v{版本号}
https://github.com/LSTM-Kirigaya/openmcp-client/releases/tag/v{version_number}
```
比如对于 0.1.1 这个版本,它的 release 页面链接为:[https://github.com/LSTM-Kirigaya/openmcp-client/releases/tag/v0.1.1](https://github.com/LSTM-Kirigaya/openmcp-client/releases/tag/v0.1.1)
For example, for version 0.1.1, its release page link is: [https://github.com/LSTM-Kirigaya/openmcp-client/releases/tag/v0.1.1](https://github.com/LSTM-Kirigaya/openmcp-client/releases/tag/v0.1.1)
`Assets` 下面,你可以找到对应的 vsix 压缩包
Under the `Assets` section, you can find the corresponding `.vsix` file.
![github release](./images/github-release.png)
![GitHub Release](./images/github-release.png)
除此之外,您还可以通过如下的商城网页来获取最新的 openmcp 的 vsix
In addition, you can also get the latest OpenMCP `.vsix` from the following marketplace web pages:
- https://open-vsx.org/extension/kirigaya/openmcp
- https://marketplace.visualstudio.com/items?itemName=kirigaya.openmcp
* [https://open-vsx.org/extension/kirigaya/openmcp](https://open-vsx.org/extension/kirigaya/openmcp)
* [https://marketplace.visualstudio.com/items?itemName=kirigaya.openmcp](https://marketplace.visualstudio.com/items?itemName=kirigaya.openmcp)
点击 vsix 后缀名的文件下载,下载完成后,您就可以直接安装它了。在 VLE 中安装外部的 vsix 文件有两种方法。
Click on the `.vsix` file to download it. After the download is complete, you can directly install it. There are two ways to install an external `.vsix` file in VLE.
### 方法一:在 VLE 中安装
### Method 1: Install in VLE
VLE 的插件商城页面有一个三个点的按钮,点击它后,你能看到下面这个列表中被我标红的按钮
In the VLE plugin marketplace page, there is a button with three dots. After clicking it, you will see a list with a button I have highlighted in red.
![vscode 插件商城](./images/vscode-plugin-market-install-from.png)
![vscode Plugin Marketplace](./images/vscode-plugin-market-install-from.png)
点击它后,找到刚刚下载的 vsix 文件,点击即可完成安装。
Click on it, find the `.vsix` file you just downloaded, and click to complete the installation.
### 方法二:通过命令行
### Method 2: Install via Command Line
如果您的 VLE 是全局安装的,会自动存在一个命令行工具,命令如下:
If your VLE is globally installed, a command-line tool will be available automatically. The command is as follows:
::: code-group
```bash [vscode]
code --install-extension /path/to/openmcp-0.1.1.vsix
```
@ -56,6 +52,7 @@ trae --install-extension /path/to/openmcp-0.1.1.vsix
```bash [cursor]
cursor --install-extension /path/to/openmcp-0.1.1.vsix
```
:::
`/path/to/openmcp-0.1.1.vsix` 代表你刚刚下载的 vsix 文件的绝对路径。这样也可以安装插件。
`/path/to/openmcp-0.1.1.vsix` represents the absolute path of the `.vsix` file you just downloaded. This will also install the plugin.

View File

@ -1,41 +1,38 @@
# Your First MCP
# 你的第一个 MCP
There are many programming languages that can implement MCP. The major ones have both official and community support, and you can easily find corresponding libraries by searching for "programming language + MCP". In the [[mcp-examples|MCP Server Development Examples]] section, we also provide examples in different programming languages.
实现 MCP 的编程语言很多,常见的几户所有编程语言都有官方和民间的支持,以 编程语言 + MCP 就能搜到对应的库,在 [[mcp-examples|MCP 服务器开发案例]] 中,我们也提供了不同编程语言的不同例子。
Among all the programming languages, Python is undoubtedly the easiest and simplest for developing MCP, especially for beginners. So for our first MCP, we will use Python. The results in other programming languages are quite similar.
在所有编程语言中Python 的 MCP 的开发无疑是最为简单,最容易让新手上手的,所以第一个 MCP我们先用 python 来实现。其他的编程语言实现效果也大同小异。
## 安装 uv
Python 写 mcp 服务器强烈推荐使用 uv 作为包管理器,关于 uv你只需要知道它是一个高性能包管理器拥有 pip 和 conda 的所有优点。没有的朋友请先通过 pip 安装 uv
## Installing `uv`
When writing an MCP server in Python, it's highly recommended to use `uv` as the package manager. All you need to know about `uv` is that it is a high-performance package manager, combining the best features of pip and conda. If you don't have it, please install `uv` using pip:
```bash
pip install uv
```
\:::warning Attention Anaconda or Miniconda users!
Please do not install `uv` in any environment other than the base environment. Install it in the base environment, as `uv` will handle environment isolation. Do not worry about it contaminating your base environment. If you don't install it in the base environment or use the global pip, we won't know where you installed `uv`. When installed via pip in the base environment, the script will be installed in `~/anaconda/bin/uv`. Please make sure that `~/anaconda/bin/` is included in your `$PATH`.
\:::
:::warning 使用 anaconda 或者 miniconda 的朋友注意了!
请不要在非 base 环境下安装 uv请在 base 环境下安装 uvuv 本身会做好环境隔离的工作,请不要担心 uv 会污染你的 base 环境。你不安装在 base 下或者使用全局默认的 pip 安装,我们根本不知道你安装的 uv 在哪里base 环境下使用 pip 安装的脚本会安装在 `~/anaconda/bin/uv` 中,也请确保 `~/anaconda/bin/` 在你的 `$PATH` 中。
:::
查看 uv 的版本:
Check the version of `uv`:
```bash
uv version
```
我的输出是:
My output is:
```
uv 0.6.9 (3d9460278 2025-03-20)
```
实操时,请保证版本不要低于我的。
Make sure your version is not lower than mine when operating.
## 创建一个最简单的 mcp 服务器
## Creating the Simplest MCP Server
我们进入工程目录,准备创建一个最简单的 mcp 服务器。
Let's enter the project directory and create the simplest MCP server.
```bash
mkdir -p ~/codes/my-first-mcp
@ -43,31 +40,32 @@ cd ~/codes/my-first-mcp
uv init --no-workspace
```
此时,你的项目里面应该有这三个文件:
At this point, your project should have the following three files:
```
README.md main.py pyproject.toml
```
然后,我们在当前文件夹打开 vscode 或者 trae我们创建一个最简单的 mcp 服务器,它的功能是:
- 提供一个名为 add 的工具,用于对两个数字进行加法
- 提供一个名为 greeting 的资源,用于返回一个 greeting 消息
Then, open the current folder in vscode or trae. Well create the simplest MCP server, which will:
先安装 mcp 相关的库:
* Provide a tool named `add` for adding two numbers.
* Provide a resource named `greeting` to return a greeting message.
First, install the necessary MCP libraries:
```bash
uv add mcp "mcp[cli]"
```
修改 `main.py` 内容如下:
Edit the content of `main.py` as follows:
```python
from mcp.server.fastmcp import FastMCP
mcp = FastMCP('锦恢的 MCP Server', version="11.45.14")
mcp = FastMCP('JinHuis MCP Server', version="11.45.14")
@mcp.tool(
name='add',
description='对两个数字进行实数域的加法'
description='Addition of two numbers in the real domain'
)
def add(a: int, b: int) -> int:
return a + b
@ -75,66 +73,62 @@ def add(a: int, b: int) -> int:
@mcp.resource(
uri="greeting://{name}",
name='greeting',
description='用于演示的一个资源协议'
description='A resource protocol for demonstration purposes'
)
def get_greeting(name: str) -> str:
return f"Hello, {name}!"
@mcp.prompt(
name='translate',
description='进行翻译的prompt'
description='A prompt for translation'
)
def translate(message: str) -> str:
return f'请将下面的话语翻译成中文:\n\n{message}'
return f'Please translate the following sentence into Chinese:\n\n{message}'
```
## 使用 OpenMCP 一键连接
## One-Click Connection Using OpenMCP
如上,我们申明了三个函数,用作 mcp 的 toolresource 和 prompt。在 OpenMCP 中启动它们非常简单,点击右上角的 OpenMCP 图标即可连接:
As mentioned above, we declared three functions as MCP tools, resources, and prompts. Its very easy to start them in OpenMCP. Simply click the OpenMCP icon at the top right to connect:
![](./images/connect-simple.png)
初次使用 OpenMCP会出现引导界面还希望阁下可以耐心看完。
The first time you use OpenMCP, a guide will appear. We hope you can take a moment to read it carefully.
![](./images/guide.png)
如果登录完成后,如图显示连接成功,则代表当前已经成功启动并连接 mcp 服务器。
Once you log in and see the "Connection Successful" message as shown in the image, this indicates that the MCP server has been successfully started and connected.
![](./images/connect-success.png)
恭喜您,万事开头难,您已经完成了最难的 mcp 连接!
Congratulations, the hardest part is over — you've successfully made the MCP connection!
有关 openmcp 进行 mcp 服务器连接的更多信息,可以参考手册里面的这一章 [[connect-mcp|连接到 MCP 服务器]]。
For more information about connecting to an MCP server using OpenMCP, you can refer to the chapter [[connect-mcp|Connecting to an MCP Server]] in the manual.
## 附录:关于 uv 启动 mcp 你必须知道的
## Appendix: What You Need to Know About Starting MCP with `uv`
OpenMCP 已经帮你做好了很多事情,但是使用 uv 启动 mcp 服务器其实是不只一种方法的,了解更加底层的原理有助于您以不变应万变。因为 OpenMCP 对于 python 项目默认运行 `uv run mcp run main.py` 来启动 mcp 服务器,但是 GitHub 上的部分项目无法这么启动。
OpenMCP has already done a lot of work for you, but there are actually multiple ways to start an MCP server with `uv`. Understanding the underlying principles will help you adapt to various situations. While OpenMCP uses `uv run mcp run main.py` to start the MCP server for Python projects, some GitHub projects may not work in this way.
先了解一下对于上面那个例子的 python 代码,应该如何通过命令行启动 mcp 吧!
Lets first learn how to start the MCP server from the command line using the Python code in the above example!
### 方法一:使用 mcp-cli
### Method 1: Using `mcp-cli`
mcp 本身提供了脚手架,可以直接启动一段被申明的 python 代码,作为一个 mcp 服务器。使用如下代码运行它:
MCP itself provides a scaffolding tool that can directly start the declared Python code as an MCP server. Use the following command to run it:
```bash
uv run mcp run main.py
```
### 方法二:在代码中显式启动
### Method 2: Explicitly Start in the Code
你也可以在代码中显式启动 mcp 服务器,在 `main.py` 的结尾添加:
You can also explicitly start the MCP server within the code by adding the following to the end of `main.py`:
```python
if __name__ == '__main__':
mcp.run()
```
然后运行如下代码即可启动 mcp 服务器:
Then run the following command to start the MCP server:
```bash
uv run main.py
```
:::warning
请不要运行 python main.py因为 uv run 会使用当前虚拟环境的库,这些库在外部 python 看来是不可见的。也不要在没有使用 `mcp.run()` 启动代码的情况下就直接使用 uv run main.py我们之前的代码只是申明了函数并没有实际上执行任何功能。
:::

Binary file not shown.

View File

@ -1,11 +1,9 @@
# 快速开始
1. [[acquire-openmcp|获取 OpenMCP]]
2. [[first-mcp|你的第一个 MCP]]
3. [[quick-debug|快速调试 MCP]]
4. [[put-into-llm|扔进大模型里面测测好坏!]]
# Quick Start
1. [[acquire-openmcp|Get OpenMCP]]
2. [[first-mcp|Your First MCP]]
3. [[quick-debug|Quick Debugging of MCP]]
4. [[put-into-llm|Throw it into a Large Model to Test Its Effectiveness!]]
<br>
<br>

View File

@ -1,65 +1,67 @@
# 扔进大模型里面测测好坏!
在 [[quick-debug|之前的章节]] 中,我们成功完成了 mcp 服务器的连接和各个功能的调试,也算是带大家认识了一下 openmcp 的基本调试功能。接下来,我们需要把 mcp 放到大模型环境中来测试毕竟mcp 提出的初衷就是为了让大家可以低成本把自己写的功能接入大模型中。
在正式进行对话前,还请大家先参照 [[connect-llm|连接大模型]] 来完成大模型 API 的配置,并测试完成你的大模型服务是否可用。
# Throw It into a Large Model to Test Its Effectiveness!
## 和大模型进行对话
In the [[quick-debug|previous chapter]], we successfully connected to the MCP server and debugged its various features—giving us a good overview of OpenMCPs basic debugging capabilities. Now, its time to place the MCP into a large language model (LLM) environment to test it. After all, the primary goal of MCP is to make it easy and low-cost to integrate your own functionalities into an LLM.
我们先创建一个新的调试项目选择「交互测试」就可以进入一个和大模型对话的窗口。OpenMCP 提供的对话窗口的基本介绍如下:
Before we begin the actual interaction, please follow the instructions in [[connect-llm|Connecting to a Large Model]] to configure your LLM API and ensure your LLM service is working correctly.
## Talking to a Large Model
Lets start by creating a new debug project and choosing **“Interactive Test”**, which opens a chat window for interacting with the LLM. Heres a basic overview of the chat window provided by OpenMCP:
![](./images/llm-intro.png)
上面标定了几个比较重要的按钮初次使用您可以直接使用默认设置。点击「使用的工具」可以看到当前激活的工具OpenMCP 默认激活当前连接的 mcp 服务器的所有提供的工具,如果您希望禁用某些工具,可以点击下方的「使用的工具」来选择性地禁用工具:
Several important buttons are marked in the image above. When using it for the first time, you can proceed with the default settings. By clicking “**Tools in Use**,” youll see the currently active tools. By default, OpenMCP activates all tools provided by the connected MCP server. If you'd like to disable any tools, you can selectively do so via the same button:
![](./images/llm-tools.png)
好啦,让我们先来看看基于 mcp 协议,大模型会如何调用我们的工具吧,保持默认设置,然后询问如下问题:<mark>请帮我计算一下 123 + 1313 等于多少</mark>
Alright, lets test how the large model calls our tools via the MCP protocol. Keep the default settings, and ask this question: <mark>Can you help me calculate 123 + 1313?</mark>
输入后回车等待结果,可以看到如下的输出:
Press enter and wait for the result. You should see something like this:
![](./images/llm-calc.png)
可以看到大模型选择使用了我们提供的工具 add 完成了上述的加法OpenMCP 中你能看到大模型是如何调用每一个工具的和工具的返回结果。目前我们问的问题和 mcp 提供的工具都比较简单,对于复杂问题,大模型有可能会在一轮内同时调用多个工具来完成特定的任务,如果你希望大模型每次都只使用一个工具,可以点击下方的默认亮着的「允许模型在单轮回复中调用多个工具」 来禁用这个特性。
As you can see, the LLM chose to use the `add` tool we provided to perform the addition. OpenMCP also shows exactly how the LLM invoked the tool and the result returned by the tool. While this example is simple, for more complex queries, the LLM may call multiple tools in a single round to complete a task. If you want the model to only use one tool per response, you can disable the default setting **“Allow multiple tools per turn”** by clicking the toggle button below.
## System Prompts
## 系统提示词
For special cases—such as [bing-images](/Users/bytedance/projects/openmcp-tutorial/bing-images), an MCP server that returns Bing images based on keywords—you may need to guide the model on how to format its output.
对于一些特殊的情况,比如 [bing-images](/Users/bytedance/projects/openmcp-tutorial/bing-images),这是一个根据关键词来返回 bing 图片的 mcp 服务器。
Try asking: <mark>Can you help me search for some Arknights images?</mark>
我们直接询问如下的问题:<mark>请帮我搜索几张明日方舟的图片</mark>,默认情况下,你有可能会得到如下的回复:
By default, you might get a response like this:
![](./images/bing-image-common.png)
大模型将得到的图片以链接的形式返回了,但是有的时候其实我希望是返回成图片的形式渲染在屏幕中让我看到的,为了约束和引导大模型返回文本的风格、或是按照我们要求的模板进行返回,我们可以通过设置系统提示词的方式来实现。
Here, the LLM returns image links. However, what we really want is for the images to be displayed directly on the screen. To instruct the LLM to return Markdown-style image outputs, we can use a **system prompt**.
我们先点击下方的「系统提示词」
Click on the “**System Prompt**” button below:
![](./images/system-prompt-add.png)
我们添加一个新的系统提示词在标题输入「bing image」然后主体部分填入
Add a new system prompt with the title **"bing image"** and the content:
```
你是一个擅长找 bing 图片的 AI当你找到图片时你应该返回图片形式的 markdown比如 ![](https://xxxx.xx/xxxx)
You are an AI skilled at finding Bing images. When you find images, you should return them in Markdown image format, e.g., ![](https://xxxx.xx/xxxx)
```
点击保存。
Click save:
![](./images/system-prompt-image.png)
然后将光标移动到第一个用户对话框上此时会显示几个按钮选择重新运行的按钮openmcp 便会重新执行此处的对话。
Next, move your cursor to the first user message box. Several buttons will appear—click the **Re-run** button to re-execute that conversation turn:
![](./images/rerun-bing-image.png)
可以看到此时,图片就被正常渲染出来了:
Now you should see the images rendered correctly:
![](./images/llm-bing-image-render.png)
For more tips on using system prompts or other advanced techniques to control the behavior of agents, see [[go-neo4j-sse|Building a Read-only Neo4j MCP Server with Go (SSE)]].
关于更多使用 system prompt 或者其他更加精准的方法来控制 agent 的技巧,可以移步 [[go-neo4j-sse|go 实现 neo4j 的只读 mcp 服务器 (SSE)]]
## Conclusion
## 结语
Great job! You've completed the basic OpenMCP tutorial. Now its time to build something fun and meaningful. Check out the [[mcp-examples|MCP Server Development Examples]] for more use cases and inspiration.
很好!你完成了 openmcp 的基础教程,接下来,该去做点有趣的事情了!在 [[mcp-examples|MCP 服务器开发案例]] 中,你能找到更多的使用 openmcp 进行 mcp 服务器开发的例子。
遍地惊喜,任君自取。
The world is full of surprises—take your pick!

View File

@ -1,49 +1,51 @@
# 快速调试 MCP
# Quick Debugging of MCP
在 [[first-mcp|你的第一个 MCP]] 中,我们成功创建了一个 MCP 服务器的最小实例,并且成功使用 openmcp 连接了这个服务器。
In [[first-mcp|Your First MCP]], we successfully created a minimal MCP server instance and connected to it using OpenMCP.
接下来,我们可以来调试这个服务器的功能了,毕竟,不是所有人都是 Jeaf Dean都能一次就写对所有代码。我们写的 MCP 服务器也不总是一开始就自信满满可以上线的,它总是存在着一些我们无法发现的问题。试想一下,如果后面我们把 mcp 连接到大模型进行全链路调试时出了问题这个时候你就会发现可能出错的环节非常多MCP 服务器的问题大模型厂商的问题OpenMCP 的问题?把可能的错误进行分类,然后逐一排查,才是符合工程直觉 (Engineering Instuition) 的做法。
Now, its time to debug the server's functions. After all, not everyone is Jeff Dean—we dont always get everything right on the first try. MCP servers are rarely production-ready the moment they're written; there are often hidden issues. Imagine debugging a full-chain setup where your MCP is connected to a large model—if something goes wrong, there are many possible failure points: is it the MCP server? The LLM vendor? OpenMCP itself? Categorizing potential errors and inspecting them one by one is the right way to go—this is **engineering intuition**.
## 认识面板
## Getting to Know the Panel
首次进入 openmcp 时,会进入一个面板,上面一共四个按钮,代表四种调试项目:
When you first open OpenMCP, youll land on a dashboard with four buttons, each representing a different type of debugging project:
![](./images/openmcp-home.png)
我们现在需要确认的是 toolresource 和 prompt 这三个功能是否运行正常因为在实际项目中tool 是使用得最多的项目,因此,我们先调试 tool。
Right now, we need to verify that the **tool**, **resource**, and **prompt** functions are working properly. Since **tool** is the most frequently used in real projects, well start by debugging it.
## 调试 Tool
## Debugging Tools
为了调试 tool我们点击面板上的 「工具」 按钮,进入 tool 调试界面。tool 面板的基本介绍如下所示
To debug a tool, click the “Tool” button on the dashboard to enter the tool debugging interface. Here's a basic overview of the tool panel:
![](./images/tool-desc.png)
调试工具,我们需要先在「工具列表」中选择一个工具(如果没有展开需要先展开工具列表,点击右侧的按钮可以刷新),然后在右侧的「参数填写和执行」中,填写需要测试的参数,点击运行,就能看到结果了:
To debug, first select a tool from the “Tool List” (click the right-side button to expand or refresh the list if needed). Then, on the right side under “Parameter Input & Execution,” enter test parameters and click **Run** to see the result:
![](./images/tool-result.png)
比如我们这边运算最简单的 2 + 2可以看到结果是 4这说明我们的 mcp 连接正常还可以正常返回结果。大家未来可以通过简单测试来验证 mcp 服务器的可用性,这在复杂 agent 系统的调试过程中非常重要。可以编码成自检程序的一部分。
For example, testing `2 + 2` yields the result `4`, confirming that our MCP is connected properly and functioning correctly. You can use simple tests like this to verify your servers availability—this becomes especially important when debugging complex agent systems, and can even be scripted as part of an automated self-check.
## 添加测试项目
## Adding a Test Project
测试完成一个项目后,我们可以通过点击上方的 + 来添加额外的测试项目:
After testing one feature, you can add more test cases by clicking the **+** icon at the top:
![](./images/tool-add-test-project.png)
这里我们选择「资源」来进行资源项目的调试工作「资源」和另外两个项目有点不一样MCP 协议中的资源访问有两种类型
Here, select **Resource** to start debugging resources. Note that MCP protocol has two types of resource access:
- resources/templates/list: 模板资源,带有访问参数,比如文件系统 mcp 中的文件访问,输入文件路径,根据资源协议返回文件内容。
- resources/list普通资源不带访问参数比如浏览器 mcp 中的 console直接返回控制台的 stdio这种就不需要参数。
* `resources/templates/list`: **Template resources**, which accept parameters (e.g., file path) and return resource content accordingly—like a file system MCP.
* `resources/list`: **Basic resources**, which do not accept parameters and simply return static or live data—like the browser console MCP that returns current stdout.
![](./images/resource-desc.png)
`resources/templates/list` 的使用方法和之前的 tool 一样,填入参数点击运行就能看到资源结果
Using `resources/templates/list` is similar to debugging tools—just enter the parameter and click **Run** to view the output:
![](./images/resource-result.png)
`resources/list` 由于没有参数,直接点击左侧的资源就能直接看到内部的数据。
For `resources/list`, since no parameters are required, just click on the resource name in the list to view its data.
## 总结
## Summary
In this chapter, we covered how to use OpenMCP to debug an MCP server, including how to test **tools** and **resources**. The process for debugging **prompts** is similar, so feel free to try it on your own.
In the next chapter, well move on to the most exciting part—testing your MCP server with a large model to see whether what you've built is truly fun and valuable.
在这一章节中,我们主要介绍了如何使用 openmcp 来调试 MCP 服务器,包括如何调试 tool 和 resourceprompt 的方法和这两个类似,大家可以自行尝试。下一章中,我们将开启最激动人心的一章,我们将把开发的 mcp 服务器扔到大模型中进行测试,这样你才知道你写的 mcp 是不是真的好玩,是不是有价值。

View File

@ -1,69 +1,65 @@
# 连接大模型
# Connecting to LLMs (Large Language Models)
如果需要使用「交互测试」来在和大模型的交互中测试 MCP 工具的性能,你需要首先需要在 OpenMCP 配置大模型。
To use the **"Interactive Testing"** feature for evaluating your MCP tools in conversations with an LLM, youll first need to configure an LLM in OpenMCP.
:::warning 协议兼容性警告
目前 OpenMCP 只支持符合 OpenAI 接口规范的 大模型服务,其他大模型的调用需要请通过 [newApi](https://github.com/QuantumNous/new-api) 进行转发或者自行实现。
:::warning Compatibility Notice
Currently, OpenMCP only supports LLM services that are **OpenAI API-compatible**. To use other types of LLMs, you can use a proxy like [newApi](https://github.com/QuantumNous/new-api) or implement your own adapter.
目前市面上主流的如下模型我们都是支持的,如果遇到大模型连接的问题,请随时 [[channel|联系我们]]。
We support most mainstream LLMs out of the box. If you encounter issues connecting to an LLM, feel free to [[channel|contact us]].
:::
在 「设置」-「API」 可以进入大模型的连接配置界面。
To configure an LLM, go to **Settings → API**:
![](./images/setting-api.png)
## 默认支持的模型
## Supported Models (by default)
OpenMCP 默认填充了市面上常见的大模型,下面是支持的模型
OpenMCP comes pre-configured with commonly used LLM services. Here is a list of supported models:
| 大模型 Name | 提供商 | baseUrl | 默认模型 |
|----------------------|---------------------------|---------------------------------------------|-----------------------|
| DeepSeek | DeepSeek | `https://api.deepseek.com/v1` | `deepseek-chat` |
| OpenAI | OpenAI | `https://api.openai.com/v1` | `gpt-4-turbo` |
| 通义千问 Qwen | Alibaba | `https://dashscope.aliyuncs.com/compatible-mode/v1` | `qwen-plus` |
| 豆包 Seed | ByteDance | `https://ark.cn-beijing.volces.com/api/v3` | `doubao-1.5-pro-32k` |
| Gemini | Google | `https://generativelanguage.googleapis.com/v1beta/openai/` | `gemini-2.0-flash` |
| Grok | xAI | `https://api.x.ai/v1` | `grok-3-mini` |
| Mistral | Mistral AI | `https://api.mistral.ai/v1` | `mistral-tiny` |
| Groq | Groq | `https://api.groq.com/openai/v1` | `mixtral-8x7b-32768` |
| Perplexity | Perplexity AI | `https://api.perplexity.ai/v1` | `pplx-7b-online` |
| Kimi Chat | 月之暗面 (Moonshot AI) | `https://api.moonshot.cn/v1` | `moonshot-v1-8k` |
| LLM Name | Provider | baseUrl | Default Model |
| --------------------- | ------------- | ---------------------------------------------------------- | -------------------- |
| DeepSeek | DeepSeek | `https://api.deepseek.com/v1` | `deepseek-chat` |
| OpenAI | OpenAI | `https://api.openai.com/v1` | `gpt-4-turbo` |
| Qwen (Tongyi Qianwen) | Alibaba | `https://dashscope.aliyuncs.com/compatible-mode/v1` | `qwen-plus` |
| Doubao Seed | ByteDance | `https://ark.cn-beijing.volces.com/api/v3` | `doubao-1.5-pro-32k` |
| Gemini | Google | `https://generativelanguage.googleapis.com/v1beta/openai/` | `gemini-2.0-flash` |
| Grok | xAI | `https://api.x.ai/v1` | `grok-3-mini` |
| Mistral | Mistral AI | `https://api.mistral.ai/v1` | `mistral-tiny` |
| Groq | Groq | `https://api.groq.com/openai/v1` | `mixtral-8x7b-32768` |
| Perplexity | Perplexity AI | `https://api.perplexity.ai/v1` | `pplx-7b-online` |
| Kimi Chat | Moonshot AI | `https://api.moonshot.cn/v1` | `moonshot-v1-8k` |
## Configuring the LLM
## 配置大模型
你需要做的只是把对应服务商的 apiToken 填入 openmcp 中即可。然后点击「测试」,看到下面的响应说明连接成功。您就可以在交互测试里面使用大模型了!
All you need to do is enter the corresponding `apiToken` for your provider. Then click the **Test** button. If the connection is successful, youll see a response like this:
![](./images/setting-api-test.png)
:::warning
有些用户会遇到无法访问的问题,请确保你的 baseUrl 填写正确。如果在国内使用某些国外厂商的服务,比如 geminiopenai请确保你的网络环境可以访问到这些服务。在 「设置」-「通用」中你可以设置代理服务器。
:::
## Adding More Models
If the model you want to use is not in the default list, you can add it in two ways:
## 添加模型
### Method 1: Update Model List Automatically
如果你想使用的指定服务商的模型不在默认支持的模型中,有两种方法可以添加它们。
### 方法一:更新模型列表
此处以通义千问为例子,确保在 apitoken 填写正确的情况下,点击「更新模型列表」,如果服务商严格实现了 openai 标准,那么就能看到所有更新的模型了。
Using Qwen as an example, after entering a valid `apiToken`, click **Update Model List**. If the provider strictly follows the OpenAI API standard, all available models will be fetched automatically.
![](./images/setting-update-models.png)
### 方法二:手动添加模型
### Method 2: Manually Add Model
如果你的服务器没有支持 openai 标准,你将无法使用「方法一」,你可以如此手动添加模型列表。此处以 Grok 为例,在服务商中找到 grok点击图中所示的编辑
If your server doesnt follow the OpenAI standard, you wont be able to use method 1. You can instead manually add models. For example, to add a Grok model:
1. Locate **Grok** under providers and click **Edit**:
![](./images/setting-api-edit.png)
点击模型,输入模型名称,回车,点击确认:
2. Click on the **Models** section, type the model name, press Enter, then click **Confirm**:
![](./images/setting-api-edit-1.png)
3. Return to the API page and click **Save**.
回到 api 页面,再点击保存。
## Adding Custom Services
## 添加服务
If the provider you want to use is not listed (e.g., a self-hosted model or a new cloud service), you can add it using the **Add Service** button. The process is similar to **Manual Model Addition** and wont be repeated here.
如果你要的服务商没有出现我们的列表中(云服务商的服务,或者自己部署的服务),可以通过「添加服务」按钮来添加自定义模型,使用方法和「添加模型」「方法二:手动添加模型」类似,就不赘述了。

View File

@ -1,77 +1,66 @@
# 连接 mcp 服务器
# Connecting to an MCP Server
不同于 Claude Desktop 和其他的 MCP 客户端类产品OpenMCP 进行 MCP 服务器连接的步骤是相当丝滑的。
Unlike Claude Desktop or other MCP client products, OpenMCP offers a **much smoother process** for connecting to MCP servers.
:::info MCP客户端
MCP 客户端是指能够通过 MCP 协议进行通信的大模型对话客户端通常是一个运行在本地的应用程序因为网页没有文件IO的权限。它的产品形式目前几乎都是聊天机器人的形式类似于你在网页使用的 chat.deepseek.com 或者 chat.openai.com
:::info What is an MCP Client?
An **MCP client** is a local application that communicates using the MCP protocol, typically in the form of a chatbot interface (similar to chat.deepseek.com or chat.openai.com). It's usually run as a local application because web pages lack direct file I/O permissions.
:::
首先,打开你的 VLE在 [[acquire-openmcp|获取 OpenMCP]] 中完成 OpenMCP 的安装后,我们先用 python 创建一个最简单的 mcp 服务器,来测试 mcp 客户端的连接。
To get started, open your VLE (Visual Language Environment). After installing OpenMCP following the steps in [[acquire-openmcp|Getting OpenMCP]], let's use Python to create a simple MCP server and test the connection.
## One-Click Connection with OpenMCP
## 使用 OpenMCP 一键连接
在 [[first-mcp|你的第一个 MCP]] 这个例子中,我们申明了三个函数,用作 mcp 的 toolresource 和 prompt。在 OpenMCP 中启动它们非常简单,点击右上角的 OpenMCP 图标即可连接:
In the [[first-mcp|Your First MCP]] example, we defined three functions as MCP `tool`, `resource`, and `prompt`. Launching them in OpenMCP is super easy—click the OpenMCP icon in the top-right corner:
![](./images/connect-simple.png)
如果登录完成后,如图显示连接成功,则代表当前已经成功启动并连接 mcp 服务器。
Once logged in, if you see a success message like below, your MCP server is now running and connected properly:
![](./images/connect-success.png)
## STDIO 连接的启动
## STDIO Connection (Auto-Start)
对于 STDIO 为连接选项的开发方案,我们提供了一键式的快速启动,您不需要额外启动 mcp 的进程。OpenMCP 会自动连接和销毁。
If you're using **STDIO** as the connection method, OpenMCP offers one-click auto-start support. You don't need to manually run the MCP process—OpenMCP will handle launching and shutting it down.
目前支持的编程语言和它们对应的启动参数为:
Currently supported languages and their launch configurations:
|语言|连接参数|启动目录|
|:-|:-|:-|
|python|uv run mcp run $\{file\} | 往上追溯,第一个找到的 pyproject.toml 的目录|
|nodejs|node $\{file\}| 往上追溯,第一个找到的 package.json 的目录|
|go|go run $\{file\}| 往上追溯,第一个找到的 go.mod 的目录|
| Language | Launch Command | Root Directory Determination |
| -------- | ------------------------ | ----------------------------------------- |
| Python | `uv run mcp run ${file}` | First parent folder with `pyproject.toml` |
| Node.js | `node ${file}` | First parent folder with `package.json` |
| Go | `go run ${file}` | First parent folder with `go.mod` |
## SSE & Streamable HTTP 连接的启动
## SSE & Streamable HTTP Connection
对于 SSE 和 Streamable HTTP 这两种远程连接的方式,由于我们并不知道您到底在哪个端口启动的服务器(因为你有可能把启动的 host 和 port 写在不可见的配置文件里或者写在环境变量里),因此,对于远程连接的情况,我们不支持自动创建服务器,您需要手动配置启动选项。
For **SSE (Server-Sent Events)** and **Streamable HTTP** remote connections, OpenMCP cant auto-start your server since the port and host may be defined in hidden config files or environment variables. In these cases, **you must manually configure the connection.**
点击 VLE 左侧插件栏目的 OpenMCP在 「MCP 连接(工作区)」 视图中,点击 + ,就可以创建一个新的连接。
To do this, open the **OpenMCP** plugin from the left sidebar in your VLE and go to **"MCP Connections (Workspace)"**. Click the "+" to create a new connection:
![](./images/add-connection.png)
选择你需要的通信方式。
Select the type of communication method:
![](./images/select-server-type.png)
输入MCP Server的地址。
Then input your MCP Server's endpoint address:
![](./images/connect-sse.png)
:::info
需要注意的是不同的通信方式一般使用不同endpoint目前的MCP server大多遵循下面的原则
## OpenMCP Plugin Control Panel
如果是以 SSE 启动,那么默认使用 /sse 作为endpoint比如 http://localhost:8001/sse
如果是以 Streamable Http 启动,那么默认使用 /mcp 作为endpoint比如 http://localhost:8001/mcp
当然允许MCP Server使用两个不同的endpoint同时支持两种连接方式这对于想要迁移到Streamable Http但短时间又不能放弃SSE的情况特别有效
:::
## openmcp 插件的控制面板
在 VLE 的左侧可以找到 openmcp 的图标,点击后就是 openmcp 的控制面板。
On the left sidebar of your VLE, youll find the OpenMCP icon. Clicking it opens the control panel:
![](./images/openmcp-control-panel.png)
当前工作区曾经连接过的 mcp 服务器会出现在这里,这是因为 openmcp 默认将工作区启动的 mcp 的连接信息存储在了 `.openmcp/tabs.{server-name}.json` 中,其中 `{server-name}` 就是 mcp 服务器连接成功的服务器名称。
Previously connected MCP servers for the current workspace will be listed here. Thats because OpenMCP stores connection details in files like:
:::warning
注意,同一个项目中,你不应该有两个名字完全相同的 mcp 服务器,这会导致 `.openmcp/tabs.{server-name}.json` 连接信息存储冲突,发生未知错误。
:::
```
.openmcp/tabs.{server-name}.json
```
如果你想要在任意工作区都能使用同一个 mcp 服务器,可以考虑在「安装的 MCP 服务器」中添加成熟耐用的 mcp 服务器,这个位置添加的 mcp 服务器全局可用。
Where `{server-name}` is the name of the connected MCP server.
If you'd like to use an MCP server **across all workspaces**, consider adding it to the **"Installed MCP Servers"** section—those servers are globally accessible.
Lastly, check out the **"Getting Started & Help"** section for reference materials to guide your journey.
在「入门与帮助」中,我们准备了一些可供入门的参考资料,还请阁下善加利用。

View File

@ -1,68 +1,70 @@
# 调试 tools, resources 和 prompts
# Debugging Tools, Resources, and Prompts
## 标签页
## Tabs
openmcp 以标签页作为调试项目的最小单元,点击栏目中的 + 可以创建新的标签页。OpenMCP 的 tools, resources 和 prompts 的基本使用与 Inspector 差不多,但是 OpenMCP 会自动帮您完成左侧资源列表的初始化Inspector 中这一步需要手动完成。
OpenMCP uses tabs as the smallest unit for debugging items. Click the "+" button in the navigation bar to create a new tab. The basic usage of Tools, Resources, and Prompts in OpenMCP is similar to the Inspector, but OpenMCP automatically initializes the resource list on the left—a step that requires manual effort in the Inspector.
## 调试内容的自动保存
## Auto-Saving Debug Content
openmcp 具备自动保存测试结果的功能。如下的行为会触发 openmcp 对标签页及其内容进行保存:
OpenMCP features automatic saving of test results. The following actions will trigger OpenMCP to save the tab and its contents:
- 创建标签页,并选择一个有效的调试项目
- 在调试页进行调试行为(选择工具,执行工具,询问大模型等)
- Creating a tab and selecting a valid debugging item.
- Performing debugging actions on the debug page (selecting tools, executing tools, querying the large model, etc.).
当前 mcp 项目的测试数据会被保存在 `.openmcp/tabs.{server-name}.json` 中,其中 `{server-name}` 就是 mcp 服务器连接成功的服务器名称。
The test data for the current MCP project is saved in `.openmcp/tabs.{server-name}.json`, where `{server-name}` corresponds to the successfully connected server's name.
:::warning
注意,同一个项目中,你不应该有两个名字完全相同的 mcp 服务器,这会导致 `.openmcp/tabs.{server-name}.json` 连接信息存储冲突,发生未知错误。
Note: Within the same project, you should not have two MCP servers with identical names. This will cause conflicts in the `.openmcp/tabs.{server-name}.json` connection information storage, leading to undefined errors.
:::
## 快速调试
## Quick Debugging
在我们调试的过程中,难免会出现大模型回答得不好,而且这是因为某个工具出错导致的,为了快速定位是不是工具的问题,可以点击下方的小飞机图标
During debugging, it's common for the large model to provide unsatisfactory responses due to tool errors. To quickly identify whether a tool is the culprit, click the small airplane icon at the bottom:
![](./images/llm-fast-debug.png)
点击后OpenMCP 会一个新的测试 tool 的项目,并自动把当时大模型使用的参数自动填充到右侧的表单中:
After clicking, OpenMCP will create a new test tool project and automatically populate the form on the right with the parameters the large model used:
![](./images/llm-fast-debug-result.png)
你要做的,只是点击运行来确定或者排除一个错误选项。
All you need to do is click "Run" to confirm or rule out a potential error.
## pydantic 支持
## Pydantic Support
使用 python 的 fastmcp 进行 tool 的创建时,你有两种方法来申明接口的类型,一种是通过 python 默认的 typing 库来申明复杂数据结构,或者通过 pydantic 来申明一个复杂的变量,下面是一个例子:
When creating tools using Python's `fastmcp`, you have two ways to declare interface types:
1. Using Python's built-in `typing` library for complex data structures.
2. Using `pydantic` to define complex variables. Here's an example:
```python
from mcp.server.fastmcp import FastMCP
from pydantic import BaseModel, Field
from typing import Optional, Union, List, NamedTuple
mcp = FastMCP('锦恢的 MCP Server', version="11.45.14")
mcp = FastMCP('Jinhui's MCP Server', version="11.45.14")
class PathParams(BaseModel):
start: str
end: str
@mcp.tool(name="test",description="用来测试")
@mcp.tool(name="test", description="Test tool")
def test(
params: PathParams,
test1: str,
test2: Union[str, List[str]] = Field("", description="测试参数2"),
test3: Optional[str] = Field(None, description="测试参数3")
test2: Union[str, List[str]] = Field("", description="Test parameter 2"),
test3: Optional[str] = Field(None, description="Test parameter 3")
):
return [test1, test2, test3, params]
```
由于我们对这两种类型的申明方式实现了内部的转换,所以 openmcp 都是支持的。值得一提的是,如果你申明的变量是一个对象,比如上面的 `PathParams`,那么 openmcp 的 tool 调试窗口会生成一个「对象输入框」,这个输入框支持基本的格式检查和自动补全:
Since we've implemented internal conversion for both declaration methods, OpenMCP supports both. Notably, if you declare a variable as an object (e.g., `PathParams` above), OpenMCP's tool debug window will generate an "Object Input Field" with basic format validation and auto-completion:
![](./images/object-input.png)
:::info 什么是对象?
这里的「对象」是 javascript 中的概念,它指的是可被序列化的数据类型中,除去基本数据类型后,剩下的那部分。比如 { "name": "helloworld" } 就是一个对象。对象在 python 中更加类似于一个 dict 或者 namedTuple。
:::info What is an "object"?
Here, an "object" refers to the JavaScript concept—serializable data types excluding primitives. For example, `{ "name": "helloworld" }` is an object. In Python, this is more akin to a `dict` or `namedTuple`.
:::
:::warning
虽然 openmcp 已经做到了尽可能多情况的支持,但是生产场景中,我们仍然不建议您将 mcp tool 的参数定义成对象,尽可能定义成简单数据类型也能更好提高大模型进行工具调用时的稳定性。
:::
Although OpenMCP supports most use cases, we still recommend avoiding object-type parameters for MCP tools in production. Using simple data types improves the stability of tool calls by the large model.
:::

View File

@ -1,14 +1,20 @@
# 分发您的实验结果
# Distributing Your Experiment Results
## 标签页恢复
## Tab Restoration
openmcp 默认会实时保存您的实验结果,每一个在工作区开启的服务器默认会将结果存储在 `.openmcp/tabs.{server-name}.json` 中,其中 `{server-name}` 就是 mcp 服务器连接成功的服务器名称。
OpenMCP automatically saves your experiment results in real-time. For each active server in the workspace, results are stored in `.openmcp/tabs.{server-name}.json`, where `{server-name}` matches the successfully connected MCP server's name.
请确保您的 `.gitignore` 文件中没有包含匹配到 .openmcp 文件夹的规则。这样,当您通过 git 提交你的代码,对 agent 的代码进行管理时,当你在别的电脑上 clone 或者他人 clone 你的项目时,就能快速恢复你上一次的实验内容,继续进行实验或者开发调试。
Ensure your `.gitignore` file doesn't contain rules excluding the `.openmcp` folder. This way, when you:
- Commit code via Git
- Manage agent code
- Clone the project on another machine
- Share the project with others
## 连接恢复
...you can instantly restore your previous experiment context to continue development or debugging.
每一个 mcp 服务器的连接信息会被保存在 `.openmcp/connection.json` 中,下面是一个例子:
## Connection Recovery
Each MCP server's connection details are saved in `.openmcp/connection.json`. Example:
```json
{
@ -28,11 +34,11 @@ openmcp 默认会实时保存您的实验结果,每一个在工作区开启的
"clientVersion": "0.0.1",
"env": {},
"serverInfo": {
"name": "锦恢的 MCP Server",
"name": "Jinhui's MCP Server",
"version": "1.9.2"
},
"filePath": "{workspace}/simple-mcp/main.py",
"name": "锦恢的 MCP Server",
"name": "Jinhui's MCP Server",
"version": "1.9.2"
}
]
@ -40,5 +46,14 @@ openmcp 默认会实时保存您的实验结果,每一个在工作区开启的
}
```
When you:
- Open the left control panel, or
- Access a previously connected MCP server
当您打开左侧的控制面板或者打开一个过去打开过的 mcp 服务器时, mcp 默认会根据上面的信息来获取工作区的服务器列表或者尝试进行自动连接。如果 openmcp 在连接 mcp 时发生了初始化错误或者保存错误,除了向 openmcp 官方求助外,您还可以尝试手动管理 `.openmcp/connection.json` 文件。
OpenMCP automatically uses this information to:
1. Retrieve the workspace's server list
2. Attempt automatic reconnection
If OpenMCP encounters initialization or save errors during connection, you can:
1. Contact OpenMCP official support, or
2. Manually manage the `.openmcp/connection.json` file

View File

@ -1,35 +1,34 @@
# 连接多个 MCP 服务器
# Connecting Multiple MCP Servers
openmcp 支持连接多个 MCP 服务器。
OpenMCP supports connecting to multiple MCP servers simultaneously.
假设你现在想要实现一个可以自动查阅资料并且整理成 word 文档的 agent你可以这样做
For example, if you want to create an agent that can automatically research topics and compile Word documents, you could:
1. 找到能进行网络搜索的 mcp[crawl4ai mcp](https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/crawl4ai-mcp)
2. 找到能进行 word 操作的 mcp[Office-Word-MCP-Server](https://github.com/GongRzhe/Office-Word-MCP-Server)
3. 在 openmcp 中把它们组合起来。
4. 自动完成你的任务!
1. Find an MCP for web searching: [crawl4ai MCP](https://github.com/LSTM-Kirigaya/openmcp-tutorial/tree/main/crawl4ai-mcp)
2. Find an MCP for Word operations: [Office-Word-MCP-Server](https://github.com/GongRzhe/Office-Word-MCP-Server)
3. Combine them in OpenMCP
4. Automate your task!
假设,我们已经连接了第一个 mcp也就是 crawl4ai mcp我们可以添加额外的 mcp 服务器:
Assuming we've already connected the first MCP (crawl4ai), here's how to add additional servers:
![](./images/add-new-mcp.png)
## 添加方法一:拖拽
## Method 1: Drag and Drop
直接把需要加入的 mcp 服务器的文件,按住 shift 点击拖拽进入 openmcp 界面就能自动完成参数的填充。
Simply shift-click and drag the MCP server file into the OpenMCP interface to auto-fill connection parameters:
![](./images/drag-to-fill.png)
:::warning
自动填充的命令不一定总是准确的,在 [STDIO 连接的启动](http://localhost:5173/openmcp/plugin-tutorial/usage/connect-mcp.html#stdio-%E8%BF%9E%E6%8E%A5%E7%9A%84%E5%90%AF%E5%8A%A8) 中我们说过这一点。具体的连接方法请阅读 [附录:关于 uv 启动 mcp 你必须知道的](http://localhost:5173/openmcp/plugin-tutorial/quick-start/first-mcp.html#%E9%99%84%E5%BD%95-%E5%85%B3%E4%BA%8E-uv-%E5%90%AF%E5%8A%A8-mcp-%E4%BD%A0%E5%BF%85%E9%A1%BB%E7%9F%A5%E9%81%93%E7%9A%84) 后自行判断。
Auto-filled commands may not always be accurate. As mentioned in [STDIO Connection Setup](http://localhost:5173/openmcp/plugin-tutorial/usage/connect-mcp.html#stdio-%E8%BF%9E%E6%8E%A5%E7%9A%84%E5%90%AF%E5%8A%A8), please review [Appendix: What You Must Know About UV MCP Startup](http://localhost:5173/openmcp/plugin-tutorial/quick-start/first-mcp.html#%E9%99%84%E5%BD%95-%E5%85%B3%E4%BA%8E-uv-%E5%90%AF%E5%8A%A8-mcp-%E4%BD%A0%E5%BF%85%E9%A1%BB%E7%9F%A5%E9%81%93%E7%9A%84) to verify connection methods.
:::
## 添加方法二:手动填写参数
## Method 2: Manual Parameter Entry
手动填写参数,没啥好说的。
Manually enter connection parameters (self-explanatory).
## Using Multiple Servers
## 使用多服务器
Using multiple servers works similarly to single-server operation - OpenMCP automatically handles tool routing and selection. The key requirement is that tool names must be unique across servers to prevent conflicts.
多服务器连接后的使用和单服务器没有太大的区别openmcp 内部会自动完成工具的调度和选择。唯一需要注意的是,多服务器的 tool name 一定不要重名,否则会出现冲突。
如果您认为 tool 重名有存在的必要性,请通过 [issue](https://github.com/LSTM-Kirigaya/openmcp-client/issues) 让我们知道您的场景和想法,根据讨论,我们会支持。
If you have a use case requiring duplicate tool names, please describe your scenario via [GitHub issue](https://github.com/LSTM-Kirigaya/openmcp-client/issues). We'll evaluate supporting this based on community discussion.

View File

@ -1,77 +1,77 @@
# MCP Server的OAuth鉴权实现
# Implementing OAuth Authentication for MCP Servers
在使用 **SSE****Streamable HTTP** 进行连接时为增强安全性可为接口设计鉴权机制MCP 官方推荐采用 OAuth 协议。下面以获取 GitHub 用户信息为例,演示如何通过 openmcp-client 完成带 OAuth 认证的接口调试。
When using **SSE** or **Streamable HTTP** connections, implementing authentication mechanisms is recommended for enhanced security. MCP officially recommends using the OAuth protocol. This guide demonstrates how to debug OAuth-authenticated APIs using openmcp-client, using GitHub user information as an example.
## 1. Obtain GitHub OAuth Credentials
To access GitHub user APIs, you'll need to create an OAuth application:
## 1. 获取Github OAuth认证ID和secret
由于我们使用了Github用户信息相关API需要先获取Github OAuth应用的Client ID和Client secret。
先进入[Github Developers](https://github.com/settings/developers),点击`New OAuth App`新建一个OAuth APP应用名称随便填`Homepage URL`填写`http://localhost:8000`,`Authorization callback URL`填写`http://localhost:8000/github/callback`。然后点击`Register application`按钮,即可成功注册一个应用。
1. Visit [GitHub Developers](https://github.com/settings/developers)
2. Click `New OAuth App`
3. Enter any application name
4. Set `Homepage URL` to `http://localhost:8000`
5. Set `Authorization callback URL` to `http://localhost:8000/github/callback`
6. Click `Register application`
![](images/oauth-github-new-application.png)
After registration:
- Save your `Client ID`
- Click `Generate a new client secret` (note: secrets are only visible once at generation)
注册成功后,请记录`Client ID`,然后点击`Generate a new client secret`生成一个`secret`注意secret仅在生成的时候可见。
## 2. Configure Environment Variables
## 2. 设置环境变量
Set your credentials as environment variables:
在获取`Client ID``secret`之后,需要将其设置为环境变量:
::: code-group
::: code-group
```bash [bash]
export MCP_GITHUB_GITHUB_CLIENT_ID={{Client ID}}
export MCP_GITHUB_GITHUB_CLIENT_SECRET={{secret}}
export MCP_GITHUB_CLIENT_ID={{Client ID}}
export MCP_GITHUB_CLIENT_SECRET={{secret}}
```
```bash [PowerShell]
```powershell [PowerShell]
$env:MCP_GITHUB_CLIENT_ID = "your_id"
$env:MCP_GITHUB_CLIENT_SECRET = "your_secret"
```
```bash [CMD]
set MCP_GITHUB_GITHUB_CLIENT_ID={{Client ID}}
set MCP_GITHUB_GITHUB_CLIENT_SECRET={{secret}}
```
```cmd [CMD]
set MCP_GITHUB_CLIENT_ID={{Client ID}}
set MCP_GITHUB_CLIENT_SECRET={{secret}}
:::
注意cmd里面设置环境变量请不要加引号。
> Note: Avoid quotes when setting variables in CMD
## 3. 克隆源码
## 3. Clone Reference Implementation
接下来我们需要部署带有OAuth认证的MCP服务器。可以参照[官方python案例](https://github.com/modelcontextprotocol/python-sdk/tree/main/examples/servers/simple-auth)进行。
需要先克隆官方python-sdk源码
Deploy an OAuth-enabled MCP server using the [official Python example](https://github.com/modelcontextprotocol/python-sdk/tree/main/examples/servers/simple-auth):
```bash
git clone https://github.com/modelcontextprotocol/python-sdk/ # 克隆源码
cd examples/servers/simple-auth # 进入对应的目录
git clone https://github.com/modelcontextprotocol/python-sdk/
cd examples/servers/simple-auth
```
## 4. 启动MCP Server
## 4. Launch MCP Server
先根据需要创建虚拟环境安装依赖,然后可以使用`uv`运行或者直接运行`python main.py`即可,注意需要先设置环境变量,不然启动会报错`2 validation errors for ServerSettings`
1. Create a virtual environment
2. Install dependencies
3. Run with `uv` or `python main.py`
## 5. 启动openmcp-client
> Important: Environment variables must be set first to avoid `2 validation errors for ServerSettings`
接下来你就可以使用openmcp-client连接刚刚启动的server了不管是使用网页端还是VSCode均可。
## 5. Connect with openmcp-client
点击加号添加连接根据server代码中的`--transport`参数决定是SSE还是Streamable HTTP。如果是SSE则URL填写`http://localhost:8000/sse`如果是Streamable HTTP则URL填写`http://localhost:8000/mcp`。认证签名无需填写。
Connect to your server via web or VSCode:
接下来连接到当前server此时会自动打开一个网页进行认证首次打开需要点击认证认证成功后该网页会自动关闭。
1. Click "+" to add connection
2. Set URL based on server's transport:
- SSE: `http://localhost:8000/sse`
- Streamable HTTP: `http://localhost:8000/mcp`
3. Leave auth signature blank
On first connection, a browser window will open for OAuth authentication. After successful auth, the window will automatically close.
![](images/oauth-github-success.png)
认证成功后,进入工具页面,应该能看到一个`get_user_profile`工具点击使用就可以获取到你的Github个人信息了。
![](images/oauth-github-tool.png)
Once authenticated, access the `get_user_profile` tool to retrieve your GitHub profile:
![](images/oauth-github-tool.png)

View File

@ -1,72 +1,64 @@
# 用大模型测试您的 mcp
# Testing Your MCP with Large Language Models
如果您完成了 [[connect-llm|连接 mcp 服务器]] 这一步,那么您就可以开始测试您的 mcp 了。
After completing [[connect-llm|connecting your MCP server]], you're ready to begin testing. While [[put-into-llm|quick start]] covered basic testing, this article details advanced configuration options visible below the input box.
在 [[put-into-llm|扔进大模型里面测测好坏!]] 中,我们已经通过一个简单的例子来展示来如何使用大模型测试您的 mcp。因此这篇文章更多是讲解不便在「快速开始」中赘述的细节。
## Model Selection
Switch between different LLMs here. OpenMCP tracks models at the message level, enabling mixed-model testing scenarios.
和大模型交互时,有一些参数可以选择,也就是输入框下面那一排按钮,我来简单介绍一下。
> Can't find your model? See [[connect-llm|connecting MCP servers]] to add new models.
## 选择模型
## System Prompts
顾名思义你可以在这里切换你的模型。值得一提的是openmcp 会以单条对话为粒度来记录每一条对话使用的模型。您可以利用这一特性来进行混合模型测试。
如果你没有找到你想要的模型,或是想要添加额外的模型,请移步 [[connect-llm|连接 mcp 服务器]] 来了解如何添加模型。
## 系统提示词
您可以在这里选择和添加系统提示词。
Manage and create system prompts through this module:
![](./images/system-prompt.png)
openmcp 默认将您的系统提示词保存在 `~/.openmcp/nedb/systemPrompt.db` 中。您可以通过 nedb 来进行反序列化和拷贝。
Prompts are stored in `~/.openmcp/nedb/systemPrompt.db` (NeDB format for easy inspection/backup).
## Prompt Generation
## 提词
您可以利用该模块来调用 mcp 服务器提供的 prompt 功能,生成的 prompt 字段会作为富文本被插入您的对话中。
Invoke server-provided prompt functions, with results inserted as rich text:
![](./images/prompt.png)
## 资源
## Resource Access
您可以利用该模块来调用 mcp 服务器提供的 resource 功能,生成的 resource 字段会作为富文本被插入您的对话中。
Call server resource functions - outputs are inserted as formatted content:
![](./images/resource.png)
:::warning openmcp 不负责 resource 的数据持久化!
请注意!每次对话完成后 resource 是否会被保存到磁盘完全由 mcp server 作者决定openmcp 不负责 resource 的数据持久化!如果您发现关闭 openmcp 再打开resource 彩蛋为空,这不是 openmcp 的 bug而是 mcp server 作者没有支持数据持久化!
:::warning Data Persistence
OpenMCP doesn't manage resource persistence! Empty resources after restart indicate the MCP server lacks storage implementation - this is expected behavior.
:::
## Parallel Tool Execution
## 允许模型在单轮回复中调用多个工具
大模型在进行工具调用时有时候会将在一次回复中要求调用多次工具比如你想要同时获取三个网页的内容翻译大模型可能会同时调用三次「网络搜索」工具如果你提供了的话。多次工具使用时openmcp 会如此渲染调用执行过程:
When enabled (default), models may call multiple tools in one response (e.g., three parallel web searches):
![](./images/parallel-tool-call.png)
openmcp 输入框的按钮中的「允许模型在单轮回复中调用多个工具」默认是点亮的。也就是允许大模型可以在一次回复中调用多次工具。
Disable for sequential execution.
有的时候,我们希望命令一条条执行,就可以选择把这个按钮熄灭。
> Note: Some providers (like Gemini) may force-disable this if they don't fully support OpenAI's parallel calling spec.
:::warning 协议兼容性警告
有的厂商(比如 gemini不一定严格支持了 openai 协议,对于不支持 「允许模型在单轮回复中调用多个工具」的厂商openmcp 的后端会自动将该选项关闭。
## Temperature Control
Controls output randomness:
- Recommended range: 0.6-0.7 for general tasks
- Default: 0.6 (balanced creativity/consistency)
## Context Window
Determines how many prior messages (default: 20) are sent with each request. Count includes:
- User queries
- Model responses
- Tool calls/results
:::danger Minimum Threshold
Values below 20 often break tool call sequencing, causing 400 errors. Start fresh conversations if this occurs.
:::
## Server Timeout
## 温度参数
温度参数越高,生成内容的随机性越强,对于通用大模型,个人建议参数为 0.6 ~ 0.7会是一个适用于通用型任务的数值。OpenMCP 提供的默认值为 0.6。
## 上下文长度
上下文长度代表了大模型的最大上下文轮数,默认值为 20。举个例子如果你和大模型对话产生了 40 轮数据(工具调用条数+你的问题数量+大模型回答的次数总共 40 个),那么下一次对话 openmcp 只会发送最后 20 条数据给大模型。
:::warning 上下文长度不要太小!
我们强烈不推荐你将这个数值设置低于 20因为大模型接受工具调用的结果需要和之前的调用请求一一对应。如果不对应大模型会返回注入 400 这样的错误。遇到这样的错误,请从最初一句话重启或者新开一个「交互测试」。
:::
## MCP 服务器超时
MCP 服务器默认超时时间为 30 秒,如果您的 mcp 服务器需要更长的时间才能完成任务,您可以通过「设置」「通用」来设置超时时间,单位为秒,该设置全局生效。
Default 30-second timeout adjustable via Settings → General (global configuration, in seconds). Increase for long-running operations.

View File

@ -1,25 +1,23 @@
# UI 配色
# UI Theme Colors
## openmcp 的主题色随跟 vscode
## OpenMCP Theme Follows VS Code
openmcp 的主题颜色完全跟随 vscode如果你想要更换 openmcp 的主题颜色,你只需要更换 vscode 的主题颜色即可。
OpenMCPs theme color automatically follows your current VS Code theme. If you want to change the appearance of OpenMCP, simply switch the theme in VS Code.
比如当你把颜色切换为社区知名主题 One Dark Pro 时openmcp 的表现:
For example, when switching to the popular community theme **One Dark Pro**, OpenMCP will adapt accordingly:
![](./images/one-dark-pro.png)
## Switching Theme Colors
## 切换主题色
这里可以切换 openmcp 的主题色(默认是粉色)
You can change OpenMCPs theme color here (default is pink):
![](./images/change-color.png)
## Special Support for Trae
## Trae 的特殊支持
OpenMCP provides enhanced theme support for **Trae**'s default color schemes. We encourage users to try different visual language environments (VLEs) like VS Code, Cursor, and Trae for the best developer experience.
openmcp 对 trae 的默认主题色都有额外的支持。我们也鼓励我们的用户尝试 vscodecursortrae 等不同的 VLE 来获得最佳手感。
Most of the examples in the official OpenMCP documentation use **Traes default "Deep Blue" theme**:
openmcp 官方文档大部分演示的例子都是基于 trae 的「深蓝」默认主题。
![](./images/trae-blue.png)
![](./images/trae-blue.png)

View File

@ -1,18 +1,17 @@
# 什么是 MCP
# What is MCP?
![](https://picx.zhimg.com/70/v2-1a2df8a081a76f4e90431d8a2445f495_1440w.avis)
MCP (Model Context Protocol)是一种开放协议用于标准化应用程序如何向大型语言模型LLMs提供上下文。可以将 MCP 想象为 AI 应用的 typec 接口。正如 typec 提供了一种标准化的方式将您的设备连接到各种外设和配件MCP 也提供了一种标准化的方式,将 AI 模型连接到不同的数据源和工具。
MCP (Model Context Protocol) is an open protocol designed to standardize how applications provide context to large language models (LLMs). You can think of MCP as a Type-C interface for AI applications. Just as Type-C provides a standardized way to connect your devices to various peripherals and accessories, MCP provides a standardized way to connect AI models to different data sources and tools.
MCP 协议由 Anthropic 在 2024 年 11 月底推出:
The MCP protocol was introduced by Anthropic in late November 2024:
- 官方文档:[Introduction](https://modelcontextprotocol.io/introduction)
- GitHub 仓库:[github.com/modelcontextprotocol](https://github.com/modelcontextprotocol)
- Official documentation: [Introduction](https://modelcontextprotocol.io/introduction)
- GitHub repository: [github.com/modelcontextprotocol](https://github.com/modelcontextprotocol)
## Why do we need MCP?
## 为什么需要 MCP
我们都知道,从最初的 chatgpt到后来的 cursorcopilot chatroom再到现在耳熟能详的 agent实际上从用户交互的角度去观察你会发现目前的大模型产品经历了如下的变化
We all know that from the initial ChatGPT, to later Cursor, Copilot Chatroom, and now the well-known agents, if we observe from the user interaction perspective, we can see the following evolution in current LLM products:
``` mermaid
graph LR
@ -21,31 +20,30 @@ a(chatbot > deepseek, chatgpt) --> b(composer > cursor, copilot) --> c(agent > A
```
- chatbot
- 只会聊天的程序。
- 工作流程:你输入问题,它给你这个问题的解决方案,但是具体执行还需要你自己去。
- 代表工作deepseekchatgpt
- Programs that only chat.
- Workflow: You input a question, it provides a solution, but you need to execute it yourself.
- Representative works: deepseek, chatgpt
- composer
- 稍微会帮你干活的实习生,仅限于写代码。
- 工作流程:你输入问题,它会给你帮你生成解决问题的代码,并且自动填入代码编辑器的编译区,你只需要审核确认即可。
- 代表工作cursorcopilot
- Interns that can help with work, but limited to coding.
- Workflow: You input a question, it generates code to solve the problem and automatically fills it in the editor's compilation area, you just need to review and confirm.
- Representative works: cursor, copilot
- agent
- 私人秘书。
- 工作流程:你输入问题,它生成这个问题的解决方案,并在征询了你的同意后全自动执行。
- 代表工作AutoGPTManusOpen Manus
- Personal assistants.
- Workflow: You input a question, it generates a solution, and after obtaining your consent, executes it automatically.
- Representative works: AutoGPT, Manus, Open Manus
为了实现 agent也就需要让 LLM 可以自如灵活地操作所有软件甚至物理世界的机器人于是需要定义统一的上下文协议与之上统一工作流。MCP(model context protocol) 就是解决这套方案的应运而生的基础协议。一个感性认识如下:
To implement agents, we need to enable LLMs to flexibly operate all software and even physical world robots, which requires defining a unified context protocol and workflow. MCP (Model Context Protocol) is the fundamental protocol born to solve this problem. An intuitive understanding is as follows:
```mermaid
graph TB
user(用户)
ai(AI软件)
llm(大模型)
computer(本地环境)
user(User)
ai(AI Software)
llm(Large Language Model)
computer(Local Environment)
user --帮我整理两会报告中有\n关AI的咨询到word文件--> agent
user --Help me organize AI-related consultations
from the Two Sessions report into a Word file--> agent
subgraph agent
@ -54,115 +52,113 @@ computer <--MCP--> ai
end
agent --> word(D:/会议总结/两会报告AI专题.docx)
agent --> word(D:/Meeting Summary/Two Sessions Report AI Topic.docx)
```
:::info
Anthropic 对于 MCP 的必要性给出的解释MCP 帮助您在 LLMs 之上构建 agent 和复杂的工作流程。LLMs 经常需要与数据和工具集成,而 MCP 提供了以下支持:
Anthropic's explanation for the necessity of MCP: MCP helps you build agents and complex workflows on top of LLMs. LLMs often need to integrate with data and tools, and MCP provides the following support:
- 一系列不断增长的预构建集成,您的 LLM 可以直接接入这些集成。
- 在 LLM 提供商和供应商之间灵活切换。
- 在基础设施内保护数据的最佳实践。
- A growing collection of pre-built integrations that your LLM can directly connect to.
- Flexibility to switch between LLM providers and vendors.
- Best practices for protecting data within infrastructure.
:::
## Overall Architecture
## 总体架构
MCP 的核心采用客户端-服务器架构,其中 host 可以连接到多个服务器:
The core of MCP adopts a client-server architecture where a host can connect to multiple servers:
```mermaid
graph LR
host[Host MCP 客户端\n 浏览器, 代码编辑器, 其他工具]
host[Host MCP Client
Browser, Code Editor, Other Tools]
server_a[MCP 服务器 A]
server_b[MCP 服务器 B]
server_c[MCP 服务器 C]
server_a[MCP Server A]
server_b[MCP Server B]
server_c[MCP Server C]
db_a[(本地\n数据源 A)]
db_b[(本地\n数据源 B)]
db_a[(Local
Data Source A)]
db_b[(Local
Data Source B)]
remote[(远程服务 C)]
remote[(Remote Service C)]
subgraph 你的电脑
subgraph Your Computer
direction LR
host <--MCP 协议--> server_a
host <--MCP 协议--> server_b
host <--MCP 协议--> server_c
host <--MCP Protocol--> server_a
host <--MCP Protocol--> server_b
host <--MCP Protocol--> server_c
server_a <--> db_a
server_b <--> db_b
end
subgraph 互联网
subgraph Internet
server_c <--Web APIs--> remote
end
```
- MCP Hosts: Programs that want to access data through MCP, such as Claude Desktop, Integrated Development Environments (IDEs), or other AI tools.
- MCP Clients: Protocol clients that maintain a 1:1 connection with servers, responsible for communicating with MCP servers.
- MCP Servers: Lightweight programs, each exposing specific functionality through the standardized Model Context Protocol.
- Local Data Sources: Computer files, databases, and services that MCP servers can securely access.
- Remote Services: External systems that MCP servers can connect to via the internet (e.g., services accessed through APIs).
## MCP Workflow
- MCP 主机MCP Hosts MCP 主机是指希望通过 MCP 访问数据的程序,例如 Claude Desktop、集成开发环境IDEs或其他 AI 工具。
- MCP 客户端MCP ClientsMCP 客户端是与服务器保持 1:1 连接的协议客户端,负责与 MCP 服务器通信。
- MCP 服务器MCP ServersMCP 服务器是轻量级程序,每个服务器通过标准化的 Model Context Protocol 暴露特定的功能。
- 本地数据源Local Data Sources本地数据源是指 MCP 服务器可以安全访问的计算机文件、数据库和服务。
- 远程服务Remote Services远程服务是指 MCP 服务器可以通过互联网连接的外部系统(例如通过 API 访问的服务)。
In terms of workflow, MCP is very similar to LSP. In fact, like LSP, current MCP also transmits data based on [JSON-RPC 2.0](https://link.zhihu.com/?target=https%3A//www.jsonrpc.org/specification) (via Stdio or SSE). Those who have developed LSP should find MCP very intuitive. I'll use several simple and clear sequence diagrams to help everyone understand how this works.
### Initialization
## MCP 的工作流程
从工作流程上MCP 和 LSP 非常非常像,事实上,目前的 MCP 和 LSP 一样,也是基于 [JSON-RPC 2.0](https://link.zhihu.com/?target=https%3A//www.jsonrpc.org/specification) 进行数据传输的基于Stdio 或者 基于SSE。如果开发过 LSP 的朋友对于 MCP 应该会感到非常的理所当然。我将用若干个简单明了的泳道图尽可能让大家看懂这玩意儿是如何执行的。
### 初始化
假设我们的软件已经支持了 MCP 客户端,那么当我们的软件启动时,它会经历如下的步骤:
Assuming our software already supports MCP clients, when the software starts, it goes through the following steps:
```mermaid
graph TB
subgraph MCP 客户端
A1[初始化]
A2[获取 MCP 服务端提供的工具集合 \n1. 创建文件 -> createFile\n2. 删除文件 -> deleteFile\n3. 使用搜索引擎 -> useBrowser\n 4. ...]
subgraph MCP Client
A1[Initialization]
A2[Get tool collection provided by MCP server
1. Create file -> createFile
2. Delete file -> deleteFile
3. Use search engine -> useBrowser
4. ...]
end
subgraph MCP 服务端
B1[初始化]
subgraph MCP Server
B1[Initialization]
end
A1 --startMCPServer--> B1
B1 --ListToolsRequestSchema--> A2
```
### Workflow
### 工作流程
假设,你是一位 C语言工程师你现在想要让 agent 自动完成一个项目的编译,那么执行流程如下:
Suppose you are a C language engineer and want an agent to automatically compile a project. The execution process would be as follows:
```mermaid
graph TB
subgraph MCP 客户端
A1[用户询问 请帮我删除 build\n下的所有编译的中间结果] --> A2[将用户询问, 资源, MCP服务端的工具集合发送给大模型]
subgraph MCP Client
A1[User asks: Please help me delete all intermediate compilation results under build] --> A2[Send user query, resources, and MCP server tool collection to LLM]
A3[大模型返回操作流\n1. deleteFile build/a.o\n2. deleteFile build/b.o]
A4[整理操作结果给大模型]
A5[最终响应展示给用户]
A3[LLM returns operation flow
1. deleteFile build/a.o
2. deleteFile build/b.o]
A4[Organize operation results for LLM]
A5[Display final response to user]
end
subgraph MCP 服务端
B1[解析大模型操作流] --征询用户同意--> B2[执行操作流]
subgraph MCP Server
B1[Parse LLM operation flow] --Request user consent--> B2[Execute operation flow]
end
subgraph 大模型
C1[大模型基于上下文生成操作方案]
C2[大模型基于所有信息生\n成自然语言响应]
subgraph LLM
C1[LLM generates operation plan based on context]
C2[LLM generates natural language response
based on all information]
end
A3 --> B1
@ -173,9 +169,8 @@ A4 --> C2
C2 --> A5
```
## Open Source Ecosystem
## 开源生态
Like LSP, which has many client and server frameworks in the open source community, MCP is the same. Currently, Anthropic has open-sourced an MCP server framework: https://github.com/modelcontextprotocol/servers. Friends who want to explore LLM applications can freely use this framework. This repository also includes many officially recognized MCP servers that can serve as learning references.
和 LSP 一样LSP 在开源社区有非常多的客户端和服务端框架MCP 也是一样的,目前 Anthropic 开源了一套 MCP 的服务端框架https://github.com/modelcontextprotocol/servers ,想要探索大模型效用的朋友可以尽情去使用这个框架。这个仓库还收录了很多的官方认可的 MCP 服务器,可以作为学习的参考。
除此之外pulsemcp 上也有很多开源社区开发的 MCP 客户端和服务端https://www.pulsemcp.com/clients
In addition, there are many MCP clients and servers developed by the open source community on pulsemcp: https://www.pulsemcp.com/clients

View File

Before

Width:  |  Height:  |  Size: 5.1 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

View File

Before

Width:  |  Height:  |  Size: 5.1 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

View File

Before

Width:  |  Height:  |  Size: 2.9 KiB

After

Width:  |  Height:  |  Size: 2.9 KiB

View File

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

Before

Width:  |  Height:  |  Size: 4.3 KiB

After

Width:  |  Height:  |  Size: 4.3 KiB

View File

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 31 KiB

View File

Before

Width:  |  Height:  |  Size: 543 KiB

After

Width:  |  Height:  |  Size: 543 KiB

BIN
zh/images/openmcp.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

Before

Width:  |  Height:  |  Size: 1.3 MiB

After

Width:  |  Height:  |  Size: 1.3 MiB

View File

Before

Width:  |  Height:  |  Size: 431 KiB

After

Width:  |  Height:  |  Size: 431 KiB

131
zh/index.md Normal file
View File

@ -0,0 +1,131 @@
---
# https://vitepress.dev/reference/default-theme-home-page
layout: home
hero:
name: "OpenMCP"
text: "面向优雅开发者的 MCP 调试器和 SDK"
tagline: 缩短从大语言模型到智能体的最后一公里
actions:
- theme: brand
text: OpenMCP 插件
link: ./plugin-tutorial
- theme: alt
text: openmcp-sdk
link: ./sdk-tutorial
- theme: alt
text: GitHub
link: https://github.com/LSTM-Kirigaya/openmcp-client
features:
- icon:
src: /images/icons/vscode.svg
height: 48px
alt: 集成调试环境
title: 集成调试环境
details: 将检查器与 MCP 客户端功能相结合,实现无缝开发和测试
- icon:
src: /images/icons/openmcp-edge.svg
height: 48px
alt: 提供完整的项目级控制面板
title: 全面的项目管理
details: 提供完整的项目级控制面板,实现高效的 MCP 项目监督
- icon:
src: /images/icons/openmcp-sdk.svg
height: 48px
alt: 提供完整的项目级控制面板
title: 完整的部署方案
details: 将测试完成的 agent 通过 openmcp-sdk 部署到您的应用或者服务器上
---
<br><br>
<h2 id="home-0">
为您的 MCP Agent 开发排忧解难
<br>
<span>Providing Fun and Convenience for Your MCP Agent Development</span>
</h2>
<BiliPlayer
url="//player.bilibili.com/player.html?isOutside=true&aid=114654638511901&bvid=BV1MFTBzpEtZ&cid=30412178228&p=1"
cover="https://picx.zhimg.com/80/v2-ed6a7eb80dfeb2f188f11d89ca6c4b5a_1440w.png"
/>
<br>
<h2 id="home-1">
OpenMCP 为谁准备?
<br>
<span>The Development of OpenMCP is for ...</span>
</h2>
<br>
<KTab class="home-tab">
<TwoSideLayout
label="专业软件工程师"
:texts="[
'测试左移,让你的开发与测试一体化,无需打开第三方软件。提供极其丰富的功能和特性。',
'在左侧面板自由而优雅地管理、调试和测试你的智能体。',
'大模型调用工具的每一个细节一览无余,不满意的调用结果直接一键复现。',
'每一次对话都会显示各项性能指标,方便进行成本管理。',
'系统提示词管理面板,让您轻松用 mcp 服务器和系统提示词构建您的智能体应用。',
]"
image="./images/openmcp.chatbot.png"
/>
<TwoSideLayout
label="开源社区爱好者"
:texts="[
'测试左移,让你的开发与测试一体化,无需打开第三方软件。提供极其丰富的功能和特性。',
'OpenMCP 完全开源,您不仅可以免费试用此产品,也可以一起加入我们,实现你的关于 Agent 的奇思妙想。',
'完全公开技术细节您不必担心您的创意和token会遭到剽窃。',
'可持久化的系统提示词管理面板,让您可以将实际的 mcp 服务器的系统提示词进行测试,以便于在社区内进行分享。',
'每一次测试的细节都会 100% 跟随 git 进行版本控制,方便你分享你的每一次试验结果,也方便你零成本复现别人的 mcp 项目。'
]"
image="./images/opensource.png"
/>
<TwoSideLayout
label="AI研发科学家"
:texts="[
'测试左移,让你的开发与测试一体化,无需打开第三方软件。提供极其丰富的功能和特性。',
'只需几行代码,就能快速将您的科研成果做成 mcp 服务器,从而接入任意大模型,以实现用户友好型的交互界面。',
'所有实验数据与配置参数均自动纳入Git版本管理系统确保研究成果可追溯、可复现便于学术交流与论文复现。',
'基于 OpenMCP 快速完成您的 demo缩短创新到落地的距离。',
]"
image="./images/openmcp.chatbot.png"
/>
</KTab>
<br>
<h2 id="home-2">
问题解答
<br>
<span>Waiting for Your Questions</span>
</h2>
<el-collapse>
<el-collapse-item title="OpenMCP 适合做什么?" name="1">
正如它的名字一样OpenMCP 是一个面向开发者的 MCP 调试器和 SDK致力于降低 AI Agent 的全链路开发成本和开发人员的心智负担。通过 OpenMCP 制作出可以在真实生活场景中解决问题,缩短工作时间的 mcp 工具,或是让工程师与研发科学家更快地交付 demo并将这份愿景让公众看到是我们的任务和使命。
</el-collapse-item>
<el-collapse-item title="OpenMCP 是免费的吗?" name="2">
是的OpenMCP 完全开源,您不仅可以免费使用此产品,也可以一起加入我们,实现你的关于 Agent 的奇思妙想。OpenMCP 的任务是建立起关于 MCP 的生态圈。因为我们认为MCP 的开发在未来一段时间内会是一项高度定制化的工作,所以当前的重点并不是赶紧出做一个看起来什么都能做的 Agent而是步步为营做出相关的生态和基础设施。
</el-collapse-item>
<el-collapse-item title="OpenMCP 不适合做什么?" name="3">
如果你试图通过 OpenMCP 开发一款什么都能做的,通用的 AI Agent你应该做的是把钱全部投资到量子计算机的研发而不是点开这个网站。记住一句话这个时代做全领域通用AI Agent依概率收敛到电信诈骗。
</el-collapse-item>
<el-collapse-item title="OpenMCP 都是什么人在开发?" name="4">
<p>OpenMCP 是由 LSTM-Kirigaya(锦恢) 最初主导开发的,用于构建 3D 相关工作的 mcp 测试工具。它的主要参与者都是大厂在职员工,高校计算机相关专业的学生、以及一些开源社区的活跃贡献者。</p>
<p>身份不重要,我非常喜欢的一句话,送给阁下:“不要回答我你会不会,回答我,你喜不喜欢”。</p>
<img src="https://pica.zhimg.com/80/v2-3666e84b2f92bf444a5eb64fb9d08e71_1440w.png" style="max-width: 500px;margin-top:10px;"/>
</el-collapse-item>
<el-collapse-item title="如何加入我们或者参与讨论?" name="5">
您可以通过 <a href="https://kirigaya.cn/openmcp/preview/join.html" target="_blank">参与 OpenMCP</a> 来了解如何参与 OpenMCP 的维护和开发。通过 <a href="https://kirigaya.cn/openmcp/preview/channel.html" target="_blank">资源频道</a> 来获取我们的联系方式。目前主要的社区有三个QQ群782833642 、 <a href="https://discord.com/invite/SKTZRf6NzU" target="_blank">OpenMCP Discord 频道</a><a href="https://www.zhihu.com/ring/host/1911121615279849840" target="_blank">知乎圈子【OpenMCP 博物馆】</a>
</el-collapse-item>
<el-collapse-item title="想要合作如何联系我们?" name="6">
合作请联系锦恢的个人邮箱1193466151@qq.com
</el-collapse-item>
</el-collapse>

View File

Before

Width:  |  Height:  |  Size: 71 KiB

After

Width:  |  Height:  |  Size: 71 KiB

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

Before

Width:  |  Height:  |  Size: 389 KiB

After

Width:  |  Height:  |  Size: 389 KiB

View File

Before

Width:  |  Height:  |  Size: 624 KiB

After

Width:  |  Height:  |  Size: 624 KiB

View File

Before

Width:  |  Height:  |  Size: 160 KiB

After

Width:  |  Height:  |  Size: 160 KiB

View File

Before

Width:  |  Height:  |  Size: 511 KiB

After

Width:  |  Height:  |  Size: 511 KiB

View File

Before

Width:  |  Height:  |  Size: 394 KiB

After

Width:  |  Height:  |  Size: 394 KiB

View File

Before

Width:  |  Height:  |  Size: 228 KiB

After

Width:  |  Height:  |  Size: 228 KiB

View File

Before

Width:  |  Height:  |  Size: 428 KiB

After

Width:  |  Height:  |  Size: 428 KiB

View File

Before

Width:  |  Height:  |  Size: 133 KiB

After

Width:  |  Height:  |  Size: 133 KiB

View File

Before

Width:  |  Height:  |  Size: 389 KiB

After

Width:  |  Height:  |  Size: 389 KiB

View File

Before

Width:  |  Height:  |  Size: 1.2 MiB

After

Width:  |  Height:  |  Size: 1.2 MiB

View File

Before

Width:  |  Height:  |  Size: 232 KiB

After

Width:  |  Height:  |  Size: 232 KiB

View File

Before

Width:  |  Height:  |  Size: 228 KiB

After

Width:  |  Height:  |  Size: 228 KiB

View File

Before

Width:  |  Height:  |  Size: 289 KiB

After

Width:  |  Height:  |  Size: 289 KiB

View File

Before

Width:  |  Height:  |  Size: 386 KiB

After

Width:  |  Height:  |  Size: 386 KiB

View File

Before

Width:  |  Height:  |  Size: 624 KiB

After

Width:  |  Height:  |  Size: 624 KiB

View File

Before

Width:  |  Height:  |  Size: 116 KiB

After

Width:  |  Height:  |  Size: 116 KiB

View File

Before

Width:  |  Height:  |  Size: 423 KiB

After

Width:  |  Height:  |  Size: 423 KiB

View File

Before

Width:  |  Height:  |  Size: 440 KiB

After

Width:  |  Height:  |  Size: 440 KiB

View File

Before

Width:  |  Height:  |  Size: 139 KiB

After

Width:  |  Height:  |  Size: 139 KiB

View File

Before

Width:  |  Height:  |  Size: 175 KiB

After

Width:  |  Height:  |  Size: 175 KiB

View File

Before

Width:  |  Height:  |  Size: 425 KiB

After

Width:  |  Height:  |  Size: 425 KiB

View File

Before

Width:  |  Height:  |  Size: 466 KiB

After

Width:  |  Height:  |  Size: 466 KiB

View File

Before

Width:  |  Height:  |  Size: 118 KiB

After

Width:  |  Height:  |  Size: 118 KiB

View File

Before

Width:  |  Height:  |  Size: 467 KiB

After

Width:  |  Height:  |  Size: 467 KiB

Some files were not shown because too many files have changed in this diff Show More