finish new agent framework

This commit is contained in:
锦恢 2025-06-20 02:36:53 +08:00
parent 66f9b128f8
commit f51bd364b4
3 changed files with 482 additions and 5 deletions

View File

@ -284,7 +284,7 @@ export class TaskLoop {
/** /**
* @description Start the loop and asynchronously update the DOM * @description Start the loop and asynchronously update the DOM
*/ */
start(tabStorage: any, userMessage: string): Promise<void>; start(tabStorage: ChatStorage, userMessage: string): Promise<void>;
/** /**
* @description Create single conversation context * @description Create single conversation context

View File

@ -1,3 +1,5 @@
import * as fs from 'fs';
import { WebSocket } from 'ws'; import { WebSocket } from 'ws';
import { EventEmitter } from 'events'; import { EventEmitter } from 'events';
import { routeMessage } from '../common/router.js'; import { routeMessage } from '../common/router.js';
@ -27,6 +29,10 @@ export interface IConnectionArgs {
cwd?: string; cwd?: string;
url?: string; url?: string;
oauth?: string; oauth?: string;
env?: {
[key: string]: string;
};
[key: string]: any;
} }
// 监听器回调类型 // 监听器回调类型
@ -93,7 +99,7 @@ export class TaskLoopAdapter {
// 默认需要将监听的消息导入到 routeMessage 中 // 默认需要将监听的消息导入到 routeMessage 中
this.onDidReceiveMessage((message) => { this.onDidReceiveMessage((message) => {
const { command, data } = message; const { command, data } = message;
switch (command) { switch (command) {
case 'nodejs/launch-signature': case 'nodejs/launch-signature':
this.postMessage({ this.postMessage({
@ -108,20 +114,20 @@ export class TaskLoopAdapter {
case 'nodejs/update-connection-signature': case 'nodejs/update-connection-signature':
// sdk 模式下不需要自动保存连接参数 // sdk 模式下不需要自动保存连接参数
break; break;
default: default:
routeMessage(command, data, this); routeMessage(command, data, this);
break; break;
} }
}); });
} }
/** /**
* @description * @description
* @param message - command args * @param message - command args
*/ */
public postMessage(message: WebSocketMessage): void { public postMessage(message: WebSocketMessage): void {
this.emitter.emit('message/service', message); this.emitter.emit('message/service', message);
} }
@ -152,3 +158,171 @@ export class TaskLoopAdapter {
} }
} }
interface StdioMCPConfig {
command: string;
args: string[];
env?: {
[key: string]: string;
};
description?: string;
prompt?: string;
}
interface HttpMCPConfig {
url: string;
type?: string;
env?: {
[key: string]: string;
};
description?: string;
prompt?: string;
}
export interface OmAgentConfiguration {
version: string;
mcpServers: {
[key: string]: StdioMCPConfig | HttpMCPConfig;
};
defaultLLM: {
baseURL: string;
apiToken: string;
model: string;
}
}
export interface OmAgentStartOption {
}
import { MessageState, type ChatMessage, type ChatSetting, type TaskLoop, type TextMessage } from '../../task-loop.js';
export function UserMessage(content: string): TextMessage {
return {
role: 'user',
content,
extraInfo: {
created: Date.now(),
state: MessageState.None,
serverName: '',
enableXmlWrapper: false
}
}
}
export function AssistantMessage(content: string): TextMessage {
return {
role: 'assistant',
content,
extraInfo: {
created: Date.now(),
state: MessageState.None,
serverName: '',
enableXmlWrapper: false
}
}
}
export class OmAgent {
public _adapter: TaskLoopAdapter;
public _loop?: TaskLoop;
constructor() {
this._adapter = new TaskLoopAdapter();
}
/**
* @description Load MCP configuration from file.
* Supports multiple MCP backends and a default LLM model configuration.
*
* @example
* Example configuration:
* {
* "version": "1.0.0",
* "mcpServers": {
* "openmemory": {
* "command": "npx",
* "args": ["-y", "openmemory"],
* "env": {
* "OPENMEMORY_API_KEY": "YOUR_API_KEY",
* "CLIENT_NAME": "openmemory"
* },
* "description": "A MCP for long-term memory support",
* "prompt": "You are a helpful assistant."
* }
* },
* "defaultLLM": {
* "baseURL": "https://api.openmemory.ai",
* "apiToken": "YOUR_API_KEY",
* "model": "deepseek-chat"
* }
* }
*
* @param configPath - Path to the configuration file
*/
public loadMcpConfig(configPath: string) {
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8')) as OmAgentConfiguration;
const { mcpServers, defaultLLM } = config;
for (const key in mcpServers) {
const mcpConfig = mcpServers[key];
if ('command' in mcpConfig) {
const commandString = (
mcpConfig.command + ' ' + mcpConfig.args.join(' ')
).trim();
this._adapter.addMcp({
commandString,
connectionType: 'STDIO',
env: mcpConfig.env,
description: mcpConfig.description,
prompt: mcpConfig.prompt,
});
} else {
const connectionType: ConnectionType = mcpConfig.type === 'http' ? 'STREAMABLE_HTTP': 'SSE';
this._adapter.addMcp({
url: mcpConfig.url,
env: mcpConfig.env,
connectionType,
description: mcpConfig.description,
prompt: mcpConfig.prompt,
});
}
}
}
public async getLoop() {
if (this._loop) {
return this._loop;
}
const adapter = this._adapter;
const { TaskLoop } = await import('../../task-loop.js');
this._loop = new TaskLoop({ adapter, verbose: 1 });
return this._loop;
}
public async start(
messages: ChatMessage[] | string,
settings?: ChatSetting
) {
if (messages.length === 0) {
throw new Error('messages is empty');
}
const loop = await this.getLoop();
const storage = await loop.createStorage(settings);
let userMessage: string;
if (typeof messages === 'string') {
userMessage = messages;
} else {
const lastMessageContent = messages.at(-1)?.content;
if (typeof lastMessageContent === 'string') {
userMessage = lastMessageContent;
} else {
throw new Error('last message content is undefined');
}
}
return await loop.start(storage, userMessage);
}
}

303
service/task-loop.d.ts vendored Normal file
View File

@ -0,0 +1,303 @@
/* eslint-disable */
import type { OpenAI } from 'openai';
export type ChatCompletionChunk = OpenAI.Chat.Completions.ChatCompletionChunk;
export type ChatCompletionCreateParamsBase = OpenAI.Chat.Completions.ChatCompletionCreateParams & { id?: string };
interface SchemaProperty {
title: string;
type: string;
description?: string;
}
interface InputSchema {
type: string;
properties: Record<string, SchemaProperty>;
required?: string[];
title?: string;
$defs?: any;
}
interface ToolItem {
name: string;
description: string;
inputSchema: InputSchema;
enabled: boolean;
anyOf?: any;
}
interface IExtraInfo {
created: number,
state: MessageState,
serverName: string,
usage?: ChatCompletionChunk['usage'];
enableXmlWrapper: boolean;
[key: string]: any;
}
interface ToolMessage {
role: 'tool';
index: number;
content: ToolCallContent[];
tool_call_id?: string
name?: string // 工具名称,当 role 为 tool
tool_calls?: ToolCall[],
extraInfo: IExtraInfo
}
interface TextMessage {
role: 'user' | 'assistant' | 'system';
content: string;
tool_call_id?: string
name?: string // 工具名称,当 role 为 tool
tool_calls?: ToolCall[],
extraInfo: IExtraInfo
}
export type ChatMessage = ToolMessage | TextMessage;
interface ChatStorage {
messages: ChatMessage[]
settings: ChatSetting
}
interface EnableToolItem {
name: string;
description: string;
enabled: boolean;
inputSchema: InputSchema;
}
export type Ref<T> = {
value: T;
};
export interface ToolCall {
id?: string;
index?: number;
type: string;
function: {
name: string;
arguments: string;
}
}
export interface ToolCallContent {
type: string;
text: string;
[key: string]: any;
}
export interface ToolCallResult {
state: MessageState;
content: ToolCallContent[];
}
export enum MessageState {
ServerError = 'server internal error',
ReceiveChunkError = 'receive chunk error',
Timeout = 'timeout',
MaxEpochs = 'max epochs',
Unknown = 'unknown error',
Abort = 'abort',
ToolCall = 'tool call failed',
None = 'none',
Success = 'success',
ParseJsonError = 'parse json error'
}
export interface IErrorMssage {
state: MessageState;
msg: string;
}
export interface IDoConversationResult {
stop: boolean;
}
export interface TaskLoopOptions {
/**
* The maximum number of epochs (conversation rounds) to perform.
*/
maxEpochs?: number;
/**
* The maximum number of retries allowed when parsing JSON responses fails.
*/
maxJsonParseRetry?: number;
/**
* A custom adapter that can be used to modify behavior or integrate with different environments.
*/
adapter?: any;
/**
* Verbosity level for logging:
* 0 - Silent, 1 - Errors only, 2 - Warnings and errors, 3 - Full debug output.
*/
verbose?: 0 | 1 | 2 | 3;
}
interface ChatSetting {
/**
* Index of the selected language model from a list of available models.
*/
modelIndex: number;
/**
* System-level prompt used to guide the behavior of the assistant.
*/
systemPrompt: string;
/**
* List of tools that are enabled and available during the chat.
*/
enableTools: EnableToolItem[];
/**
* Sampling temperature for generating responses.
* Higher values (e.g., 0.8) make output more random; lower values (e.g., 0.2) make it more focused and deterministic.
*/
temperature: number;
/**
* Whether web search is enabled for enhancing responses with real-time information.
*/
enableWebSearch: boolean;
/**
* Maximum length of the conversation context to keep.
*/
contextLength: number;
/**
* Whether multiple tools can be called in parallel within a single message.
*/
parallelToolCalls: boolean;
/**
* Whether to wrap tool call responses in XML format.
*/
enableXmlWrapper: boolean;
}
/**
* @description
*/
export class TaskLoop {
constructor(taskOptions?: TaskLoopOptions);
/**
* @description make chat data
* @param tabStorage
*/
makeChatData(tabStorage: any): ChatCompletionCreateParamsBase | undefined;
/**
* @description stop the task loop
*/
abort(): void;
/**
* @description Register a callback function triggered on error
* @param handler
*/
registerOnError(handler: (msg: IErrorMssage) => void): void;
/**
* @description Register a callback function triggered on chunk
* @param handler
*/
registerOnChunk(handler: (chunk: ChatCompletionChunk) => void): void;
/**
* @description Register a callback function triggered at the beginning of each epoch
* @param handler
*/
registerOnDone(handler: () => void): void;
/**
* @description Register a callback function triggered at the beginning of each epoch
* @param handler
*/
registerOnEpoch(handler: () => void): void;
/**
* @description Registers a callback function that is triggered when a tool call is completed. This method allows you to intercept and modify the output of the tool call.
* @param handler
*/
registerOnToolCalled(handler: (toolCallResult: ToolCallResult) => ToolCallResult): void;
/**
* @description Register a callback triggered after tool call finishes. You can intercept and modify the output.
* @param handler
*/
registerOnToolCall(handler: (toolCall: ToolCall) => ToolCall): void;
/**
* @description Get current LLM configuration
*/
getLlmConfig(): any;
/**
* @description Set the current LLM configuration, for Node.js environment
* @param config
* @example
* setLlmConfig({
* id: 'openai',
* baseUrl: 'https://api.openai.com/v1',
* userToken: 'sk-xxx',
* userModel: 'gpt-3.5-turbo',
* })
*/
setLlmConfig(config: any): void;
/**
* @description Set proxy server
* @param maxEpochs
*/
setMaxEpochs(maxEpochs: number): void;
/**
* @description bind streaming content and tool calls
*/
bindStreaming(content: Ref<string>, toolCalls: Ref<ToolCall[]>): void;
/**
* @description not finish
*/
connectToService(): Promise<void>;
/**
* @description
* @param proxyServer
*/
setProxyServer(proxyServer: string): void;
/**
* @description Get all available tool list
*/
listTools(): Promise<ToolItem[]>;
/**
* @description Start the loop and asynchronously update the DOM
*/
start(tabStorage: ChatStorage, userMessage: string): Promise<void>;
/**
* @description Create single conversation context
*/
createStorage(settings?: ChatSetting): Promise<ChatStorage>;
}
export declare const getToolSchema: any;
export declare const useMessageBridge: any;
export declare const llmManager: any;
export declare const llms: any;
export declare const pinkLog: any;
export declare const redLog: any;
export declare const ElMessage: any;
export declare const handleToolCalls: any;
export declare const getPlatform: any;