diff --git a/examples/agents/experimental/human.ts b/examples/agents/experimental/human.ts new file mode 100644 index 00000000..5ef8fd38 --- /dev/null +++ b/examples/agents/experimental/human.ts @@ -0,0 +1,102 @@ +import "dotenv/config.js"; +import { BeeAgent } from "bee-agent-framework/agents/bee/agent"; +import { createConsoleReader } from "../../helpers/io.js"; // Use the examples console reader +import { FrameworkError } from "bee-agent-framework/errors"; +import { TokenMemory } from "bee-agent-framework/memory/tokenMemory"; +import { Logger } from "bee-agent-framework/logger/logger"; +import { OpenMeteoTool } from "bee-agent-framework/tools/weather/openMeteo"; + +// Import the HumanTool from the updated file +import { HumanTool } from "../../tools/experimental/human.js"; + +import { + BeeSystemPrompt, + BeeAssistantPrompt, + BeeUserPrompt, + BeeUserEmptyPrompt, + BeeToolErrorPrompt, + BeeToolInputErrorPrompt, + BeeToolNoResultsPrompt, + BeeToolNotFoundPrompt, +} from "bee-agent-framework/agents/bee/prompts"; + +// Set up logger +Logger.root.level = "silent"; // Disable internal logs +const logger = new Logger({ name: "app", level: "trace" }); + +// Initialize LLM (test against llama as requested) +import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat"; +const llm = new OllamaChatLLM({ + modelId: "llama3.1", +}); + +// Create the console reader once, share it with HumanTool +const reader = createConsoleReader(); + +// Initialize BeeAgent with shared reader for HumanTool +const agent = new BeeAgent({ + llm, + memory: new TokenMemory({ llm }), + tools: [new OpenMeteoTool(), new HumanTool(reader)], + templates: { + system: BeeSystemPrompt, + assistant: BeeAssistantPrompt, + user: BeeUserPrompt, + userEmpty: BeeUserEmptyPrompt, + toolError: BeeToolErrorPrompt, + toolInputError: BeeToolInputErrorPrompt, + toolNoResultError: BeeToolNoResultsPrompt, + toolNotFoundError: BeeToolNotFoundPrompt, + }, +}); + +// Main loop +try { + for await (const { prompt } of reader) { + // Run the agent and observe events + const response = await agent + .run( + { prompt }, + { + execution: { + maxRetriesPerStep: 3, + totalMaxRetries: 10, + maxIterations: 20, + }, + }, + ) + .observe((emitter) => { + // Show only final answers + emitter.on("update", async ({ update }) => { + if (update.key === "final_answer") { + reader.write("Agent 🤖 : ", update.value); + } + }); + + // Log errors + emitter.on("error", ({ error }) => { + reader.write("Agent 🤖 : ", FrameworkError.ensure(error).dump()); + }); + + // Retry notifications + emitter.on("retry", () => { + reader.write("Agent 🤖 : ", "Retrying the action..."); + }); + }); + + // Print the final response + if (response.result?.text) { + reader.write("Agent 🤖 : ", response.result.text); + } else { + reader.write( + "Agent 🤖 : ", + "No result was returned. Ensure your input is valid or check tool configurations.", + ); + } + } +} catch (error) { + logger.error(FrameworkError.ensure(error).dump()); +} finally { + // Gracefully close the reader when exiting the app + reader.close(); +} diff --git a/examples/helpers/io.ts b/examples/helpers/io.ts index 06f1085a..296ce20f 100644 --- a/examples/helpers/io.ts +++ b/examples/helpers/io.ts @@ -27,16 +27,26 @@ export function createConsoleReader({ .concat("\n"), ); }, + async prompt(): Promise { + // This uses the async iterator below. If it's exhausted, return empty string. for await (const { prompt } of this) { return prompt; } - process.exit(0); + return ""; + }, + + // New method: Asks a single question without consuming the async iterator. + async askSingleQuestion(queryMessage: string): Promise { + const answer = await rl.question(R.piped(picocolors.cyan, picocolors.bold)(queryMessage)); + return stripAnsi(answer.trim()); }, + close() { stdin.pause(); rl.close(); }, + async *[Symbol.asyncIterator]() { if (!isActive) { return; @@ -64,7 +74,7 @@ export function createConsoleReader({ } yield { prompt, iteration }; } - } catch (e) { + } catch (e: any) { if (e.code === "ERR_USE_AFTER_CLOSE") { return; } diff --git a/examples/tools/experimental/human.ts b/examples/tools/experimental/human.ts new file mode 100644 index 00000000..42437e6c --- /dev/null +++ b/examples/tools/experimental/human.ts @@ -0,0 +1,84 @@ +import { Emitter } from "bee-agent-framework/emitter/emitter"; +import { + Tool, + BaseToolRunOptions, + StringToolOutput, + ToolInput, + ToolEvents, +} from "bee-agent-framework/tools/base"; +import { z } from "zod"; + +export class HumanTool extends Tool { + name = "HumanTool"; + description = ` + This tool is used whenever the user's input is unclear, ambiguous, or incomplete. + The agent MUST invoke this tool when additional clarification is required to proceed. + The output must adhere strictly to the following structure: + - Thought: A single-line description of the need for clarification. + - Function Name: HumanTool + - Function Input: { "message": "Your question to the user for clarification." } + - Function Output: The user's response in JSON format. + Examples: + - Example 1: + Input: "What is the weather?" + Thought: "The user's request lacks a location. I need to ask for clarification." + Function Name: HumanTool + Function Input: { "message": "Could you provide the location for which you would like to know the weather?" } + Function Output: { "clarification": "Santa Fe, Argentina" } + Final Answer: The current weather in Santa Fe, Argentina is 17.3°C with a relative humidity of 48% and a wind speed of 10.1 km/h. + + - Example 2: + Input: "Can you help me?" + Thought: "The user's request is too vague. I need to ask for more details." + Function Name: HumanTool + Function Input: { "message": "Could you clarify what kind of help you need?" } + Function Output: { "clarification": "I need help understanding how to use the project management tool." } + Final Answer: Sure, I can help you with the project management tool. Let me know which feature you'd like to learn about or if you'd like a general overview. + + - Example 3: + Input: "Translate this sentence." + Thought: "The user's request is incomplete. I need to ask for the sentence they want translated." + Function Name: HumanTool + Function Input: { "message": "Could you specify the sentence you would like me to translate?" } + Function Output: { "clarification": "Translate 'Hello, how are you?' to French." } + Final Answer: The French translation of 'Hello, how are you?' is 'Bonjour, comment vas-tu?' + + Note: Do NOT attempt to guess or provide incomplete responses. Always use this tool when in doubt to ensure accurate and meaningful interactions. +`; + + public readonly emitter: Emitter, StringToolOutput>> = + Emitter.root.child({ + namespace: ["tool", "human"], + creator: this, + }); + + private reader: ReturnType; + + constructor(reader: ReturnType) { + super(); + this.reader = reader; + } + + inputSchema = () => + z.object({ + message: z.string().min(1, "Message cannot be empty"), + }); + + async _run( + input: z.infer>, + _options: BaseToolRunOptions, + ): Promise { + // Use the shared reader instance provided to the constructor + this.reader.write("HumanTool", input.message); + + // Use askSingleQuestion instead of prompt to avoid interfering with main loop iterator + const userInput = await this.reader.askSingleQuestion("User 👤 : "); + + // Format the output as required + const formattedOutput = `{ + "clarification": "${userInput.trim()}" + }`; + + return new StringToolOutput(formattedOutput); + } +}