diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py
index 4842bd4e9f53..90757af6fc3e 100644
--- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py
+++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py
@@ -519,7 +519,7 @@ def _generate_retrieve_user_reply(
self.problem, self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
)
doc_contents = self._get_context(self._results)
- if doc_contents:
+ if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
break
elif update_context_case2:
# Use the current intermediate info as the query text to retrieve docs, and each time we append the top similar
@@ -531,7 +531,7 @@ def _generate_retrieve_user_reply(
)
self._get_context(self._results)
doc_contents = "\n".join(self._doc_contents) # + "\n" + "\n".join(self._intermediate_answers)
- if doc_contents:
+ if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
break
self.clear_history()
diff --git a/autogen/agentchat/contrib/society_of_mind_agent.py b/autogen/agentchat/contrib/society_of_mind_agent.py
index 2f6be5088a4d..e76768187c9f 100644
--- a/autogen/agentchat/contrib/society_of_mind_agent.py
+++ b/autogen/agentchat/contrib/society_of_mind_agent.py
@@ -39,6 +39,7 @@ def __init__(
code_execution_config: Union[Dict, Literal[False]] = False,
llm_config: Optional[Union[Dict, Literal[False]]] = False,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
+ **kwargs,
):
super().__init__(
name=name,
@@ -50,6 +51,7 @@ def __init__(
code_execution_config=code_execution_config,
llm_config=llm_config,
default_auto_reply=default_auto_reply,
+ **kwargs,
)
self.update_chat_manager(chat_manager)
diff --git a/autogen/agentchat/contrib/web_surfer.py b/autogen/agentchat/contrib/web_surfer.py
index af07be6d3432..f74915a9b403 100644
--- a/autogen/agentchat/contrib/web_surfer.py
+++ b/autogen/agentchat/contrib/web_surfer.py
@@ -41,6 +41,7 @@ def __init__(
summarizer_llm_config: Optional[Union[Dict, Literal[False]]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
browser_config: Optional[Union[Dict, None]] = None,
+ **kwargs,
):
super().__init__(
name=name,
@@ -53,6 +54,7 @@ def __init__(
code_execution_config=code_execution_config,
llm_config=llm_config,
default_auto_reply=default_auto_reply,
+ **kwargs,
)
self._create_summarizer_client(summarizer_llm_config, llm_config)
diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py
index 2d8958f3282d..a088c491082e 100644
--- a/autogen/agentchat/conversable_agent.py
+++ b/autogen/agentchat/conversable_agent.py
@@ -78,6 +78,7 @@ def __init__(
default_auto_reply: Union[str, Dict] = "",
description: Optional[str] = None,
chat_messages: Optional[Dict[Agent, List[Dict]]] = None,
+ silent: Optional[bool] = None,
):
"""
Args:
@@ -126,6 +127,8 @@ def __init__(
chat_messages (dict or None): the previous chat messages that this agent had in the past with other agents.
Can be used to give the agent a memory by providing the chat history. This will allow the agent to
resume previous had conversations. Defaults to an empty chat history.
+ silent (bool or None): (Experimental) whether to print the message sent. If None, will use the value of
+ silent in each function.
"""
# we change code_execution_config below and we have to make sure we don't change the input
# in case of UserProxyAgent, without this we could even change the default value {}
@@ -147,6 +150,7 @@ def __init__(
if is_termination_msg is not None
else (lambda x: content_str(x.get("content")) == "TERMINATE")
)
+ self.silent = silent
# Take a copy to avoid modifying the given dict
if isinstance(llm_config, dict):
try:
@@ -263,6 +267,10 @@ def _validate_llm_config(self, llm_config):
)
self.client = None if self.llm_config is False else OpenAIWrapper(**self.llm_config)
+ @staticmethod
+ def _is_silent(agent: Agent, silent: Optional[bool] = False) -> bool:
+ return agent.silent if agent.silent is not None else silent
+
@property
def name(self) -> str:
"""Get the name of the agent."""
@@ -606,7 +614,9 @@ def _process_message_before_send(
"""Process the message before sending it to the recipient."""
hook_list = self.hook_lists["process_message_before_send"]
for hook in hook_list:
- message = hook(sender=self, message=message, recipient=recipient, silent=silent)
+ message = hook(
+ sender=self, message=message, recipient=recipient, silent=ConversableAgent._is_silent(self, silent)
+ )
return message
def send(
@@ -648,7 +658,7 @@ def send(
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
- message = self._process_message_before_send(message, recipient, silent)
+ message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
# When the agent composes and sends the message, the role of the message is "assistant"
# unless it's "function".
valid = self._append_oai_message(message, "assistant", recipient)
@@ -698,7 +708,7 @@ async def a_send(
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
- message = self._process_message_before_send(message, recipient, silent)
+ message = self._process_message_before_send(message, recipient, ConversableAgent._is_silent(self, silent))
# When the agent composes and sends the message, the role of the message is "assistant"
# unless it's "function".
valid = self._append_oai_message(message, "assistant", recipient)
@@ -780,7 +790,8 @@ def _process_received_message(self, message: Union[Dict, str], sender: Agent, si
raise ValueError(
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
- if not silent:
+
+ if not ConversableAgent._is_silent(sender, silent):
self._print_received_message(message, sender)
def receive(
@@ -2185,7 +2196,7 @@ def _format_json_str(jstr):
Ex 2:
"{\n \"location\": \"Boston, MA\"\n}" -> "{"location": "Boston, MA"}"
- 2. this function also handles JSON escape sequences inside quotes,
+ 2. this function also handles JSON escape sequences inside quotes.
Ex 1:
'{"args": "a\na\na\ta"}' -> '{"args": "a\\na\\na\\ta"}'
"""
diff --git a/autogen/agentchat/user_proxy_agent.py b/autogen/agentchat/user_proxy_agent.py
index a80296a8355a..d50e4d8b89c5 100644
--- a/autogen/agentchat/user_proxy_agent.py
+++ b/autogen/agentchat/user_proxy_agent.py
@@ -35,6 +35,7 @@ def __init__(
llm_config: Optional[Union[Dict, Literal[False]]] = False,
system_message: Optional[Union[str, List]] = "",
description: Optional[str] = None,
+ **kwargs,
):
"""
Args:
@@ -79,6 +80,8 @@ def __init__(
Only used when llm_config is not False. Use it to reprogram the agent.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
+ **kwargs (dict): Please refer to other kwargs in
+ [ConversableAgent](conversable_agent#__init__).
"""
super().__init__(
name=name,
@@ -93,6 +96,7 @@ def __init__(
description=(
description if description is not None else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode]
),
+ **kwargs,
)
if logging_enabled():
diff --git a/autogen/coding/base.py b/autogen/coding/base.py
index ccbfe6b92932..7c9e19d73f33 100644
--- a/autogen/coding/base.py
+++ b/autogen/coding/base.py
@@ -4,7 +4,6 @@
from pydantic import BaseModel, Field
-from ..agentchat.agent import LLMAgent
from ..types import UserMessageImageContentPart, UserMessageTextContentPart
__all__ = ("CodeBlock", "CodeResult", "CodeExtractor", "CodeExecutor", "CodeExecutionConfig")
diff --git a/autogen/oai/cohere.py b/autogen/oai/cohere.py
index e04d07327203..35b7ac97c4f3 100644
--- a/autogen/oai/cohere.py
+++ b/autogen/oai/cohere.py
@@ -415,8 +415,9 @@ def oai_messages_to_cohere_messages(
# If we're adding tool_results, like we are, the last message can't be a USER message
# So, we add a CHATBOT 'continue' message, if so.
+ # Changed key from "content" to "message" (jaygdesai/autogen_Jay)
if cohere_messages[-1]["role"] == "USER":
- cohere_messages.append({"role": "CHATBOT", "content": "Please continue."})
+ cohere_messages.append({"role": "CHATBOT", "message": "Please continue."})
# We return a blank message when we have tool results
# TODO: Check what happens if tool_results aren't the latest message
diff --git a/dotnet/eng/MetaInfo.props b/dotnet/eng/MetaInfo.props
index f43a47c8ce27..72918fabe4f4 100644
--- a/dotnet/eng/MetaInfo.props
+++ b/dotnet/eng/MetaInfo.props
@@ -1,7 +1,7 @@
- 0.0.16
+ 0.0.17
AutoGen
https://microsoft.github.io/autogen-for-net/
https://github.com/microsoft/autogen
diff --git a/dotnet/src/AutoGen.Core/Extension/GroupChatExtension.cs b/dotnet/src/AutoGen.Core/Extension/GroupChatExtension.cs
index a5009e211556..6b17a2b93fd6 100644
--- a/dotnet/src/AutoGen.Core/Extension/GroupChatExtension.cs
+++ b/dotnet/src/AutoGen.Core/Extension/GroupChatExtension.cs
@@ -51,7 +51,10 @@ public static async IAsyncEnumerable SendAsync(
yield break;
}
- chatHistory = messages;
+ // messages will contain the complete chat history, include initalize messages
+ // but we only need to add the last message to the chat history
+ // fix #3268
+ chatHistory = chatHistory.Append(lastMessage);
}
}
diff --git a/dotnet/src/AutoGen.DotnetInteractive/AutoGen.DotnetInteractive.csproj b/dotnet/src/AutoGen.DotnetInteractive/AutoGen.DotnetInteractive.csproj
index 96b331f2df3b..5778761f05da 100644
--- a/dotnet/src/AutoGen.DotnetInteractive/AutoGen.DotnetInteractive.csproj
+++ b/dotnet/src/AutoGen.DotnetInteractive/AutoGen.DotnetInteractive.csproj
@@ -27,12 +27,9 @@
-
-
-
-
+
diff --git a/dotnet/src/AutoGen.DotnetInteractive/DotnetInteractiveFunction.cs b/dotnet/src/AutoGen.DotnetInteractive/DotnetInteractiveFunction.cs
index bb5504cd5487..c9b59203462b 100644
--- a/dotnet/src/AutoGen.DotnetInteractive/DotnetInteractiveFunction.cs
+++ b/dotnet/src/AutoGen.DotnetInteractive/DotnetInteractiveFunction.cs
@@ -2,14 +2,12 @@
// DotnetInteractiveFunction.cs
using System.Text;
-using System.Text.Json;
-using Azure.AI.OpenAI;
using Microsoft.DotNet.Interactive.Documents;
using Microsoft.DotNet.Interactive.Documents.Jupyter;
namespace AutoGen.DotnetInteractive;
-public class DotnetInteractiveFunction : IDisposable
+public partial class DotnetInteractiveFunction : IDisposable
{
private readonly InteractiveService? _interactiveService = null;
private string _notebookPath;
@@ -71,6 +69,7 @@ public DotnetInteractiveFunction(InteractiveService interactiveService, string?
/// Run existing dotnet code from message. Don't modify the code, run it as is.
///
/// code.
+ [Function]
public async Task RunCode(string code)
{
if (this._interactiveService == null)
@@ -117,6 +116,7 @@ public async Task RunCode(string code)
/// Install nuget packages.
///
/// nuget package to install.
+ [Function]
public async Task InstallNugetPackages(string[] nugetPackages)
{
if (this._interactiveService == null)
@@ -173,105 +173,6 @@ private async Task AddCellAsync(string cellContent, string kernelName)
writeStream.Dispose();
}
- private class RunCodeSchema
- {
- public string code { get; set; } = string.Empty;
- }
-
- public Task RunCodeWrapper(string arguments)
- {
- var schema = JsonSerializer.Deserialize(
- arguments,
- new JsonSerializerOptions
- {
- PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
- });
-
- return RunCode(schema!.code);
- }
-
- public FunctionDefinition RunCodeFunction
- {
- get => new FunctionDefinition
- {
- Name = @"RunCode",
- Description = """
-Run existing dotnet code from message. Don't modify the code, run it as is.
-""",
- Parameters = BinaryData.FromObjectAsJson(new
- {
- Type = "object",
- Properties = new
- {
- code = new
- {
- Type = @"string",
- Description = @"code.",
- },
- },
- Required = new[]
- {
- "code",
- },
- },
- new JsonSerializerOptions
- {
- PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
- })
- };
- }
-
- private class InstallNugetPackagesSchema
- {
- public string[] nugetPackages { get; set; } = Array.Empty();
- }
-
- public Task InstallNugetPackagesWrapper(string arguments)
- {
- var schema = JsonSerializer.Deserialize(
- arguments,
- new JsonSerializerOptions
- {
- PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
- });
-
- return InstallNugetPackages(schema!.nugetPackages);
- }
-
- public FunctionDefinition InstallNugetPackagesFunction
- {
- get => new FunctionDefinition
- {
- Name = @"InstallNugetPackages",
- Description = """
-Install nuget packages.
-""",
- Parameters = BinaryData.FromObjectAsJson(new
- {
- Type = "object",
- Properties = new
- {
- nugetPackages = new
- {
- Type = @"array",
- Items = new
- {
- Type = @"string",
- },
- Description = @"nuget package to install.",
- },
- },
- Required = new[]
- {
- "nugetPackages",
- },
- },
- new JsonSerializerOptions
- {
- PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
- })
- };
- }
public void Dispose()
{
this._interactiveService?.Dispose();
diff --git a/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.cs b/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.cs
index 8eeb117141d8..b90d78be3f19 100644
--- a/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.cs
+++ b/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.cs
@@ -36,7 +36,6 @@ public virtual string TransformText()
using System.Threading.Tasks;
using System;
using AutoGen.Core;
-using AutoGen.OpenAI.Extension;
");
if (!String.IsNullOrEmpty(NameSpace)) {
diff --git a/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.tt b/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.tt
index dc41f0af9d70..e7ed476fde8b 100644
--- a/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.tt
+++ b/dotnet/src/AutoGen.SourceGenerator/Template/FunctionCallTemplate.tt
@@ -13,7 +13,6 @@ using System.Text.Json.Serialization;
using System.Threading.Tasks;
using System;
using AutoGen.Core;
-using AutoGen.OpenAI.Extension;
<#if (!String.IsNullOrEmpty(NameSpace)) {#>
namespace <#=NameSpace#>
diff --git a/dotnet/test/AutoGen.SourceGenerator.Tests/ApprovalTests/FunctionCallTemplateTests.TestFunctionCallTemplate.approved.txt b/dotnet/test/AutoGen.SourceGenerator.Tests/ApprovalTests/FunctionCallTemplateTests.TestFunctionCallTemplate.approved.txt
index f223d3124ddd..ea5a8585cc2f 100644
--- a/dotnet/test/AutoGen.SourceGenerator.Tests/ApprovalTests/FunctionCallTemplateTests.TestFunctionCallTemplate.approved.txt
+++ b/dotnet/test/AutoGen.SourceGenerator.Tests/ApprovalTests/FunctionCallTemplateTests.TestFunctionCallTemplate.approved.txt
@@ -8,7 +8,6 @@ using System.Text.Json.Serialization;
using System.Threading.Tasks;
using System;
using AutoGen.Core;
-using AutoGen.OpenAI.Extension;
namespace AutoGen.SourceGenerator.Tests
{
diff --git a/dotnet/test/AutoGen.Tests/GroupChat/GroupChatTests.cs b/dotnet/test/AutoGen.Tests/GroupChat/GroupChatTests.cs
index 7a7a27be9b10..19ca02ae92fa 100644
--- a/dotnet/test/AutoGen.Tests/GroupChat/GroupChatTests.cs
+++ b/dotnet/test/AutoGen.Tests/GroupChat/GroupChatTests.cs
@@ -4,8 +4,10 @@
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
+using Moq;
using Xunit;
namespace AutoGen.Tests;
@@ -51,4 +53,36 @@ public async Task ItTerminateConversationWhenAgentReturnTerminateKeyWord()
chatHistory.Count().Should().Be(3);
chatHistory.Last().From.Should().Be("Cathy");
}
+
+ [Fact]
+ public async Task ItSendAsyncDoesntAddDuplicateInitializeMessagesTest()
+ {
+ // fix #3268
+ var alice = new DefaultReplyAgent("Alice", "I am alice");
+ var bob = new DefaultReplyAgent("Bob", "I am bob");
+ var cathy = new DefaultReplyAgent("Cathy", $"I am cathy, {GroupChatExtension.TERMINATE}");
+
+ var roundRobinOrchestrator = new RoundRobinOrchestrator();
+ var orchestrator = Mock.Of();
+ Mock.Get(orchestrator).Setup(x => x.GetNextSpeakerAsync(It.IsAny(), It.IsAny()))
+ .Returns((OrchestrationContext context, CancellationToken token) =>
+ {
+ // determine if initialize message is already sent and not added twice
+ context.ChatHistory.Where(x => x.From == alice.Name).Count().Should().Be(1);
+
+ return roundRobinOrchestrator.GetNextSpeakerAsync(context, token);
+ });
+
+ var groupChat = new GroupChat([alice, bob, cathy], orchestrator);
+ groupChat.AddInitializeMessage(new TextMessage(Role.User, "Hello", from: alice.Name));
+
+ var maxRound = 2;
+ var chatHistory = new List();
+ await foreach (var message in groupChat.SendAsync(chatHistory, maxRound))
+ {
+ chatHistory.Add(message);
+ }
+
+ chatHistory.Count().Should().Be(2);
+ }
}
diff --git a/dotnet/website/release_note/0.0.17.md b/dotnet/website/release_note/0.0.17.md
new file mode 100644
index 000000000000..ad245191e7d0
--- /dev/null
+++ b/dotnet/website/release_note/0.0.17.md
@@ -0,0 +1,45 @@
+# AutoGen.Net 0.0.17 Release Notes
+
+## π What's New
+
+1. **.NET Core Target Framework Support** ([#3203](https://github.com/microsoft/autogen/issues/3203))
+ - π Added support for .NET Core to ensure compatibility and enhanced performance of AutoGen packages across different platforms.
+
+2. **Kernel Support in Interactive Service Constructor** ([#3181](https://github.com/microsoft/autogen/issues/3181))
+ - π§ Enhanced the Interactive Service to accept a kernel in its constructor, facilitating usage in notebook environments.
+
+3. **Constructor Options for OpenAIChatAgent** ([#3126](https://github.com/microsoft/autogen/issues/3126))
+ - βοΈ Added new constructor options for `OpenAIChatAgent` to allow full control over chat completion flags/options.
+
+4. **Step-by-Step Execution for Group Chat** ([#3075](https://github.com/microsoft/autogen/issues/3075))
+ - π οΈ Introduced an `IAsyncEnumerable` extension API to run group chat step-by-step, enabling developers to observe internal processes or implement early stopping mechanisms.
+
+## π Improvements
+
+1. **Cancellation Token Addition in Graph APIs** ([#3111](https://github.com/microsoft/autogen/issues/3111))
+ - π Added cancellation tokens to async APIs in the `AutoGen.Core.Graph` class to follow best practices and enhance the control flow.
+
+## β οΈ API Breaking Changes
+
+1. **FunctionDefinition Generation Stopped in Source Generator** ([#3133](https://github.com/microsoft/autogen/issues/3133))
+ - π Stopped generating `FunctionDefinition` from `Azure.AI.OpenAI` in the source generator to eliminate unnecessary package dependencies. Migration guide:
+ - β‘οΈ Use `ToOpenAIFunctionDefinition()` extension from `AutoGen.OpenAI` for generating `FunctionDefinition` from `AutoGen.Core.FunctionContract`.
+ - β‘οΈ Use `FunctionContract` for metadata such as function name or parameters.
+
+2. **Namespace Renaming for AutoGen.WebAPI** ([#3152](https://github.com/microsoft/autogen/issues/3152))
+ - βοΈ Renamed the namespace of `AutoGen.WebAPI` from `AutoGen.Service` to `AutoGen.WebAPI` to maintain consistency with the project name.
+
+3. **Semantic Kernel Version Update** ([#3118](https://github.com/microsoft/autogen/issues/3118))
+ - π Upgraded the Semantic Kernel version to 1.15.1 for enhanced functionality and performance improvements. This might introduce break change for those who use a lower-version semantic kernel.
+
+## π Documentation
+
+1. **Consume AutoGen.Net Agent in AG Studio** ([#3142](https://github.com/microsoft/autogen/issues/3142))
+ - Added detailed documentation on using AutoGen.Net Agent as a model in AG Studio, including examples of starting an OpenAI chat backend and integrating third-party OpenAI models.
+
+2. **Middleware Overview Documentation Errors Fixed** ([#3129](https://github.com/microsoft/autogen/issues/3129))
+ - Corrected logic and compile errors in the example code provided in the Middleware Overview documentation to ensure it runs without issues.
+
+---
+
+We hope you enjoy the new features and improvements in AutoGen.Net 0.0.17! If you encounter any issues or have feedback, please open a new issue on our [GitHub repository](https://github.com/microsoft/autogen/issues).
\ No newline at end of file
diff --git a/dotnet/website/release_note/toc.yml b/dotnet/website/release_note/toc.yml
index d3b8559a9a32..f8753cacc890 100644
--- a/dotnet/website/release_note/toc.yml
+++ b/dotnet/website/release_note/toc.yml
@@ -1,3 +1,6 @@
+- name: 0.0.17
+ href: 0.0.17.md
+
- name: 0.0.16
href: 0.0.16.md
diff --git a/notebook/agentchat_RetrieveChat.ipynb b/notebook/agentchat_RetrieveChat.ipynb
index adb13ac47bd5..6fefcd3ba44c 100644
--- a/notebook/agentchat_RetrieveChat.ipynb
+++ b/notebook/agentchat_RetrieveChat.ipynb
@@ -48,14 +48,14 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "models to use: ['gpt-3.5-turbo-0125']\n"
+ "models to use: ['gpt-35-turbo', 'gpt4-1106-preview', 'gpt-4o']\n"
]
}
],
@@ -73,9 +73,7 @@
"# a vector database instance\n",
"from autogen.retrieve_utils import TEXT_FORMATS\n",
"\n",
- "config_list = [\n",
- " {\"model\": \"gpt-3.5-turbo-0125\", \"api_key\": \"\", \"api_type\": \"openai\"},\n",
- "]\n",
+ "config_list = autogen.config_list_from_json(\"OAI_CONFIG_LIST\")\n",
"\n",
"assert len(config_list) > 0\n",
"print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])"
@@ -107,7 +105,7 @@
"output_type": "stream",
"text": [
"Accepted file formats for `docs_path`:\n",
- "['odt', 'xml', 'pdf', 'docx', 'html', 'md', 'htm', 'csv', 'rst', 'org', 'ppt', 'doc', 'log', 'json', 'epub', 'jsonl', 'pptx', 'yml', 'xlsx', 'tsv', 'txt', 'yaml', 'msg', 'rtf']\n"
+ "['txt', 'json', 'csv', 'tsv', 'md', 'html', 'htm', 'rtf', 'rst', 'jsonl', 'log', 'xml', 'yaml', 'yml', 'pdf']\n"
]
}
],
@@ -120,7 +118,16 @@
"cell_type": "code",
"execution_count": 3,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/workspace/anaconda3/envs/autogen312/lib/python3.12/site-packages/sentence_transformers/cross_encoder/CrossEncoder.py:11: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
+ " from tqdm.autonotebook import tqdm, trange\n"
+ ]
+ }
+ ],
"source": [
"# 1. create an RetrieveAssistantAgent instance named \"assistant\"\n",
"assistant = RetrieveAssistantAgent(\n",
@@ -160,6 +167,7 @@
" # \"client\": chromadb.PersistentClient(path=\"/tmp/chromadb\"), # deprecated, use \"vector_db\" instead\n",
" \"vector_db\": \"chroma\", # to use the deprecated `client` parameter, set to None and uncomment the line above\n",
" \"overwrite\": False, # set to True if you want to overwrite an existing collection\n",
+ " \"get_or_create\": True, # set to False if don't want to reuse an existing collection\n",
" },\n",
" code_execution_config=False, # set to False if you don't want to execute the code\n",
")"
@@ -188,7 +196,8 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "2024-04-07 17:30:56,955 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - \u001b[32mUse the existing collection `autogen-docs`.\u001b[0m\n"
+ "2024-08-02 06:30:11,303 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - \u001b[32mUse the existing collection `autogen-docs`.\u001b[0m\n",
+ "2024-08-02 06:30:11,485 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - Found 2 chunks.\u001b[0m\n"
]
},
{
@@ -202,7 +211,6 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "2024-04-07 17:30:59,609 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - Found 2 chunks.\u001b[0m\n",
"Number of requested results 20 is greater than number of elements in index 2, updating n_results = 2\n"
]
},
@@ -361,65 +369,53 @@
"--------------------------------------------------------------------------------\n",
"\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
"\n",
- "To perform a classification task using FLAML and use Spark to do parallel training for 30 seconds and force cancel jobs if the time limit is reached, you can follow these steps:\n",
- "\n",
- "1. First, convert your data into Spark dataframe format using `to_pandas_on_spark` function from `flaml.automl.spark.utils` module.\n",
- "2. Then, format your data for use SparkML models by using `VectorAssembler`.\n",
- "3. Define your AutoML settings, including the `metric`, `time_budget`, and `task`.\n",
- "4. Use `AutoML` from `flaml` to run AutoML with SparkML models by setting `use_spark` to `true`, and `estimator_list` to a list of spark-based estimators, like `[\"lgbm_spark\"]`.\n",
- "5. Set `n_concurrent_trials` to the desired number of parallel jobs and `force_cancel` to `True` to cancel the jobs if the time limit is reached.\n",
- "\n",
- "Here's an example code snippet for performing classification using FLAML and Spark:\n",
- "\n",
"```python\n",
- "import pandas as pd\n",
+ "import flaml\n",
"from flaml.automl.spark.utils import to_pandas_on_spark\n",
"from pyspark.ml.feature import VectorAssembler\n",
- "import flaml\n",
+ "import pandas as pd\n",
"\n",
- "# Creating a dictionary\n",
+ "# Example Data (Please provide real data in practice)\n",
"data = {\n",
- " \"sepal_length\": [5.1, 4.9, 4.7, 4.6, 5.0],\n",
- " \"sepal_width\": [3.5, 3.0, 3.2, 3.1, 3.6],\n",
- " \"petal_length\": [1.4, 1.4, 1.3, 1.5, 1.4],\n",
- " \"petal_width\": [0.2, 0.2, 0.2, 0.2, 0.2],\n",
- " \"species\": [\"setosa\", \"setosa\", \"setosa\", \"setosa\", \"setosa\"]\n",
+ " \"feature1\": [0, 1, 2, 3, 4],\n",
+ " \"feature2\": [1, 2, 3, 4, 5],\n",
+ " # ... add all features you need for your classification\n",
+ " \"label\": ['a', 'b', 'a', 'a', 'b'], # assuming binary classification with labels 'a' and 'b'\n",
"}\n",
"\n",
- "# Creating a pandas DataFrame\n",
- "dataframe = pd.DataFrame(data)\n",
- "label = \"species\"\n",
+ "# Convert to Pandas DataFrame\n",
+ "pdf = pd.DataFrame(data)\n",
"\n",
- "# Convert to pandas-on-spark dataframe\n",
- "psdf = to_pandas_on_spark(dataframe)\n",
+ "# Generate pandas-on-spark dataframe\n",
+ "psdf = to_pandas_on_spark(pdf)\n",
"\n",
- "# Format data for SparkML models\n",
- "columns = psdf.columns\n",
- "feature_cols = [col for col in columns if col != label]\n",
+ "# Organize data into feature vectors and labels\n",
+ "label_col = \"label\"\n",
+ "feature_cols = [col for col in psdf.columns if col != label_col]\n",
"featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n",
- "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n",
"\n",
- "# Define AutoML settings\n",
- "settings = {\n",
+ "# Apply the transformation\n",
+ "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\", label_col]\n",
+ "\n",
+ "# Prepare AutoML settings\n",
+ "automl_settings = {\n",
" \"time_budget\": 30,\n",
- " \"metric\": \"accuracy\",\n",
+ " \"metric\": \"accuracy\", # Change this to a classification metric you prefer\n",
" \"task\": \"classification\",\n",
+ " \"n_concurrent_trials\": 2, # Or other number that fits your Spark cluster configuration\n",
+ " \"use_spark\": True,\n",
+ " \"force_cancel\": True, # Enable force cancel to obey the time constraint\n",
+ " \"estimator_list\": [\"lgbm_spark\"], # Specify SparkML estimators you want to try\n",
"}\n",
"\n",
- "# Use AutoML with SparkML models and parallel jobs\n",
+ "# Create an AutoML instance\n",
"automl = flaml.AutoML()\n",
- "automl.fit(\n",
- " dataframe=psdf,\n",
- " label=label,\n",
- " estimator_list=[\"lgbm_spark\"],\n",
- " use_spark=True,\n",
- " n_concurrent_trials=2,\n",
- " force_cancel=True,\n",
- " **settings,\n",
- ")\n",
- "```\n",
"\n",
- "Note that the above code assumes the data is small enough to train within 30 seconds. If you have a larger dataset, you may need to increase the `time_budget` and adjust the number of parallel jobs accordingly.\n",
+ "# Run the AutoML search\n",
+ "automl.fit(dataframe=psdf, label=label_col, **automl_settings)\n",
+ "``` \n",
+ "\n",
+ "Remember to replace the example data with your real dataset and choose an appropriate metric for your classification task. You'll also need a configured and running Spark environment to utilize the \"use_spark\" feature.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
@@ -439,25 +435,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Number of requested results 60 is greater than number of elements in index 2, updating n_results = 2\n",
- "Number of requested results 100 is greater than number of elements in index 2, updating n_results = 2\n",
- "Number of requested results 140 is greater than number of elements in index 2, updating n_results = 2\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "VectorDB returns doc_ids: [['bdfbc921']]\n",
- "VectorDB returns doc_ids: [['bdfbc921']]\n",
- "VectorDB returns doc_ids: [['bdfbc921']]\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Number of requested results 180 is greater than number of elements in index 2, updating n_results = 2\n"
+ "Number of requested results 60 is greater than number of elements in index 2, updating n_results = 2\n"
]
},
{
@@ -470,18 +448,13 @@
"\n",
"TERMINATE\n",
"\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
"--------------------------------------------------------------------------------\n"
]
- },
- {
- "data": {
- "text/plain": [
- "ChatResult(chat_id=None, chat_history=[{'content': 'TERMINATE', 'role': 'assistant'}], summary='', cost=({'total_cost': 0.007691, 'gpt-35-turbo': {'cost': 0.007691, 'prompt_tokens': 4242, 'completion_tokens': 664, 'total_tokens': 4906}}, {'total_cost': 0}), human_input=[])"
- ]
- },
- "execution_count": 4,
- "metadata": {},
- "output_type": "execute_result"
}
],
"source": [
@@ -2836,7 +2809,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.9"
+ "version": "3.12.4"
},
"skip_test": "Requires interactive usage"
},
diff --git a/samples/apps/cap/py/autogencap/DirectorySvc.py b/samples/apps/cap/py/autogencap/DirectorySvc.py
index acb3b6223df5..6057558c0b24 100644
--- a/samples/apps/cap/py/autogencap/DirectorySvc.py
+++ b/samples/apps/cap/py/autogencap/DirectorySvc.py
@@ -8,7 +8,7 @@
from autogencap.ActorConnector import ActorConnector, ActorSender
from autogencap.Broker import Broker
from autogencap.Config import router_url, xpub_url, xsub_url
-from autogencap.Constants import Directory_Svc_Topic
+from autogencap.constants import Directory_Svc_Topic
from autogencap.DebugLog import Debug, Error, Info
from autogencap.proto.CAP_pb2 import (
ActorInfo,
diff --git a/samples/apps/cap/py/autogencap/actor_runtime.py b/samples/apps/cap/py/autogencap/actor_runtime.py
new file mode 100644
index 000000000000..027b20905877
--- /dev/null
+++ b/samples/apps/cap/py/autogencap/actor_runtime.py
@@ -0,0 +1,36 @@
+from abc import ABC, abstractmethod
+from typing import List
+
+from .Actor import Actor
+from .ActorConnector import ActorConnector
+from .proto.CAP_pb2 import ActorInfo
+
+
+class IRuntime(ABC):
+ @abstractmethod
+ def register(self, actor: Actor):
+ pass
+
+ @abstractmethod
+ def connect(self):
+ pass
+
+ @abstractmethod
+ def disconnect(self):
+ pass
+
+ @abstractmethod
+ def find_by_topic(self, topic: str) -> ActorConnector:
+ pass
+
+ @abstractmethod
+ def find_by_name(self, name: str) -> ActorConnector:
+ pass
+
+ @abstractmethod
+ def find_termination(self) -> ActorConnector:
+ pass
+
+ @abstractmethod
+ def find_by_name_regex(self, name_regex) -> List[ActorInfo]:
+ pass
diff --git a/samples/apps/cap/py/autogencap/ag_adapter/AG2CAP.py b/samples/apps/cap/py/autogencap/ag_adapter/AG2CAP.py
index 1854d219e7d1..4b5f79aefd85 100644
--- a/samples/apps/cap/py/autogencap/ag_adapter/AG2CAP.py
+++ b/samples/apps/cap/py/autogencap/ag_adapter/AG2CAP.py
@@ -3,7 +3,7 @@
from autogen import Agent, ConversableAgent
-from ..ComponentEnsemble import ComponentEnsemble
+from ..actor_runtime import IRuntime
from .AutoGenConnector import AutoGenConnector
@@ -14,13 +14,13 @@ class AG2CAP(ConversableAgent):
def __init__(
self,
- ensemble: ComponentEnsemble,
+ ensemble: IRuntime,
agent_name: str,
agent_description: Optional[str] = None,
):
super().__init__(name=agent_name, description=agent_description, llm_config=False)
self._agent_connector: AutoGenConnector = None
- self._ensemble: ComponentEnsemble = ensemble
+ self._ensemble: IRuntime = ensemble
self._recv_called = False
def reset_receive_called(self):
diff --git a/samples/apps/cap/py/autogencap/ag_adapter/AGActor.py b/samples/apps/cap/py/autogencap/ag_adapter/AGActor.py
index cc301f38e7e7..6bd804e33e9e 100644
--- a/samples/apps/cap/py/autogencap/ag_adapter/AGActor.py
+++ b/samples/apps/cap/py/autogencap/ag_adapter/AGActor.py
@@ -1,7 +1,7 @@
import zmq
from autogencap.Actor import Actor
-from autogencap.Constants import Termination_Topic
+from autogencap.constants import Termination_Topic
from autogencap.DebugLog import Debug
diff --git a/samples/apps/cap/py/autogencap/ag_adapter/CAP2AG.py b/samples/apps/cap/py/autogencap/ag_adapter/CAP2AG.py
index 789ebd9bf4b8..0dd3b7be5ced 100644
--- a/samples/apps/cap/py/autogencap/ag_adapter/CAP2AG.py
+++ b/samples/apps/cap/py/autogencap/ag_adapter/CAP2AG.py
@@ -4,7 +4,7 @@
from autogen import ConversableAgent
-from ..ComponentEnsemble import ComponentEnsemble
+from ..actor_runtime import IRuntime
from ..DebugLog import Debug, Error, Info, Warn, shorten
from ..proto.Autogen_pb2 import GenReplyReq, GenReplyResp, PrepChat, ReceiveReq, Terminate
from .AG2CAP import AG2CAP
@@ -27,10 +27,10 @@ def __init__(self, ag_agent: ConversableAgent, the_other_name: str, init_chat: b
self.STATE = self.States.INIT
self._can2ag_name: str = self.actor_name + ".can2ag"
self._self_recursive: bool = self_recursive
- self._ensemble: ComponentEnsemble = None
+ self._ensemble: IRuntime = None
self._connectors = {}
- def on_connect(self, ensemble: ComponentEnsemble):
+ def on_connect(self, ensemble: IRuntime):
"""
Connect to the AutoGen system.
"""
@@ -38,7 +38,7 @@ def on_connect(self, ensemble: ComponentEnsemble):
self._ag2can_other_agent = AG2CAP(self._ensemble, self._other_agent_name)
Debug(self._can2ag_name, "connected to {ensemble}")
- def disconnect_network(self, ensemble: ComponentEnsemble):
+ def disconnect_network(self, ensemble: IRuntime):
"""
Disconnect from the AutoGen system.
"""
diff --git a/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChat.py b/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChat.py
index 5fad7b359e16..caf2a11a66f1 100644
--- a/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChat.py
+++ b/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChat.py
@@ -3,7 +3,8 @@
from autogen import Agent, AssistantAgent, GroupChat
from autogencap.ag_adapter.AG2CAP import AG2CAP
from autogencap.ag_adapter.CAP2AG import CAP2AG
-from autogencap.ComponentEnsemble import ComponentEnsemble
+
+from ..actor_runtime import IRuntime
class CAPGroupChat(GroupChat):
@@ -13,10 +14,10 @@ def __init__(
messages: List[str],
max_round: int,
chat_initiator: str,
- ensemble: ComponentEnsemble,
+ ensemble: IRuntime,
):
self.chat_initiator: str = chat_initiator
- self._cap_network: ComponentEnsemble = ensemble
+ self._cap_network: IRuntime = ensemble
self._cap_proxies: List[CAP2AG] = []
self._ag_proxies: List[AG2CAP] = []
self._ag_agents: List[Agent] = agents
diff --git a/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChatManager.py b/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChatManager.py
index 85a746d7c661..e71e6aecddf8 100644
--- a/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChatManager.py
+++ b/samples/apps/cap/py/autogencap/ag_adapter/CAPGroupChatManager.py
@@ -4,12 +4,13 @@
from autogencap.ActorConnector import ActorConnector
from autogencap.ag_adapter.CAP2AG import CAP2AG
from autogencap.ag_adapter.CAPGroupChat import CAPGroupChat
-from autogencap.ComponentEnsemble import ComponentEnsemble
+
+from ..actor_runtime import IRuntime
class CAPGroupChatManager:
- def __init__(self, groupchat: CAPGroupChat, llm_config: dict, network: ComponentEnsemble):
- self._ensemble: ComponentEnsemble = network
+ def __init__(self, groupchat: CAPGroupChat, llm_config: dict, network: IRuntime):
+ self._ensemble: IRuntime = network
self._cap_group_chat: CAPGroupChat = groupchat
self._ag_group_chat_manager: GroupChatManager = GroupChatManager(
groupchat=self._cap_group_chat, llm_config=llm_config
diff --git a/samples/apps/cap/py/autogencap/Constants.py b/samples/apps/cap/py/autogencap/constants.py
similarity index 76%
rename from samples/apps/cap/py/autogencap/Constants.py
rename to samples/apps/cap/py/autogencap/constants.py
index 8326d6753d35..217fcf45e619 100644
--- a/samples/apps/cap/py/autogencap/Constants.py
+++ b/samples/apps/cap/py/autogencap/constants.py
@@ -1,2 +1,3 @@
Termination_Topic: str = "Termination"
Directory_Svc_Topic: str = "Directory_Svc"
+ZMQ_Runtime: str = "ZMQ"
diff --git a/samples/apps/cap/py/autogencap/runtime_factory.py b/samples/apps/cap/py/autogencap/runtime_factory.py
new file mode 100644
index 000000000000..77fb091a248d
--- /dev/null
+++ b/samples/apps/cap/py/autogencap/runtime_factory.py
@@ -0,0 +1,47 @@
+from autogencap.actor_runtime import IRuntime
+from autogencap.constants import ZMQ_Runtime
+from autogencap.DebugLog import Error
+from autogencap.zmq_runtime import ZMQRuntime
+
+
+class RuntimeFactory:
+ _supported_runtimes = {}
+
+ """
+ Factory class for creating a runtime instance.
+ """
+
+ @staticmethod
+ def get_runtime(runtime_type: str = ZMQ_Runtime) -> IRuntime:
+ """
+ Creates a runtime instance based on the runtime type.
+
+ :param runtime_type: The type of runtime to create.
+ :return: The runtime instance.
+ """
+ if runtime_type in RuntimeFactory._supported_runtimes:
+ return RuntimeFactory._supported_runtimes[runtime_type]
+ else:
+ not_found = f"Runtime type not found: {runtime_type}"
+ Error("RuntimeFactory", not_found)
+ raise ValueError(not_found)
+
+ @staticmethod
+ def register_runtime(runtime_type: str, runtime: IRuntime):
+ """
+ Registers a runtime instance.
+
+ :param runtime: The runtime instance.
+ """
+ RuntimeFactory._supported_runtimes[runtime_type] = runtime
+
+ @classmethod
+ def _initialize(cls):
+ """
+ Static initialization method.
+ """
+ cls.register_runtime(ZMQ_Runtime, ZMQRuntime())
+
+
+# Static initialization
+RuntimeFactory._initialize()
diff --git a/samples/apps/cap/py/autogencap/ComponentEnsemble.py b/samples/apps/cap/py/autogencap/zmq_runtime.py
similarity index 96%
rename from samples/apps/cap/py/autogencap/ComponentEnsemble.py
rename to samples/apps/cap/py/autogencap/zmq_runtime.py
index ebb31fb9aa7c..a8074fb48d3b 100644
--- a/samples/apps/cap/py/autogencap/ComponentEnsemble.py
+++ b/samples/apps/cap/py/autogencap/zmq_runtime.py
@@ -4,15 +4,16 @@
import zmq
from .Actor import Actor
+from .actor_runtime import IRuntime
from .ActorConnector import ActorConnector
from .Broker import Broker
-from .Constants import Termination_Topic
+from .constants import Termination_Topic
from .DebugLog import Debug, Warn
from .DirectorySvc import DirectorySvc
from .proto.CAP_pb2 import ActorInfo, ActorInfoCollection
-class ComponentEnsemble:
+class ZMQRuntime(IRuntime):
def __init__(self, name: str = "Local Actor Network", start_broker: bool = True):
self.local_actors = {}
self.name: str = name
diff --git a/samples/apps/cap/py/demo/AppAgents.py b/samples/apps/cap/py/demo/AppAgents.py
index bed163a293ee..f13e4b471fd6 100644
--- a/samples/apps/cap/py/demo/AppAgents.py
+++ b/samples/apps/cap/py/demo/AppAgents.py
@@ -5,9 +5,10 @@
"""
from autogencap.Actor import Actor
+from autogencap.actor_runtime import IRuntime
from autogencap.ActorConnector import ActorConnector
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.DebugLog import Debug, Info, shorten
+from autogencap.runtime_factory import RuntimeFactory
class GreeterAgent(Actor):
@@ -136,7 +137,7 @@ def __init__(
self.quant: ActorConnector = None
self.risk_manager: ActorConnector = None
- def on_connect(self, network: ComponentEnsemble):
+ def on_connect(self, network: IRuntime):
"""
Connects the personal assistant to the specified local actor network.
@@ -150,7 +151,7 @@ def on_connect(self, network: ComponentEnsemble):
self.risk_manager = network.find_by_name("Risk Manager")
Debug(self.actor_name, "connected")
- def disconnect_network(self, network: ComponentEnsemble):
+ def disconnect_network(self, network: IRuntime):
"""
Disconnects the personal assistant from the specified local actor network.
diff --git a/samples/apps/cap/py/demo/CAPAutGenGroupDemo.py b/samples/apps/cap/py/demo/CAPAutGenGroupDemo.py
index 4c3aa1b80305..93a28c753ca5 100644
--- a/samples/apps/cap/py/demo/CAPAutGenGroupDemo.py
+++ b/samples/apps/cap/py/demo/CAPAutGenGroupDemo.py
@@ -1,7 +1,7 @@
from autogencap.ag_adapter.CAPGroupChat import CAPGroupChat
from autogencap.ag_adapter.CAPGroupChatManager import CAPGroupChatManager
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.DebugLog import Info
+from autogencap.runtime_factory import RuntimeFactory
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
@@ -31,7 +31,7 @@ def cap_ag_group_demo():
system_message="Creative in software product ideas.",
llm_config=gpt4_config,
)
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
cap_groupchat = CAPGroupChat(
agents=[user_proxy, coder, pm], messages=[], max_round=12, ensemble=ensemble, chat_initiator=user_proxy.name
)
diff --git a/samples/apps/cap/py/demo/CAPAutoGenPairDemo.py b/samples/apps/cap/py/demo/CAPAutoGenPairDemo.py
index 00ff7a892878..77323ddf8707 100644
--- a/samples/apps/cap/py/demo/CAPAutoGenPairDemo.py
+++ b/samples/apps/cap/py/demo/CAPAutoGenPairDemo.py
@@ -2,8 +2,8 @@
import autogencap.DebugLog as DebugLog
from autogencap.ag_adapter.CAPPair import CAPPair
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.DebugLog import ConsoleLogger, Info
+from autogencap.runtime_factory import RuntimeFactory
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
@@ -20,7 +20,7 @@ def cap_ag_pair_demo():
)
# Composable Agent Platform AutoGen Pair adapter
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
pair = CAPPair(ensemble, user_proxy, assistant)
user_cmd = "Plot a chart of MSFT daily closing prices for last 1 Month"
diff --git a/samples/apps/cap/py/demo/ComplexActorDemo.py b/samples/apps/cap/py/demo/ComplexActorDemo.py
index 6f6215daece8..b82d457cc67b 100644
--- a/samples/apps/cap/py/demo/ComplexActorDemo.py
+++ b/samples/apps/cap/py/demo/ComplexActorDemo.py
@@ -1,7 +1,7 @@
import time
from AppAgents import FidelityAgent, FinancialPlannerAgent, PersonalAssistant, QuantAgent, RiskManager
-from autogencap.ComponentEnsemble import ComponentEnsemble
+from autogencap.runtime_factory import RuntimeFactory
from termcolor import colored
@@ -14,7 +14,7 @@ def complex_actor_demo():
sends them to the personal assistant agent, and terminates
when the user enters "quit".
"""
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
# Register agents
ensemble.register(PersonalAssistant())
ensemble.register(FidelityAgent())
diff --git a/samples/apps/cap/py/demo/SimpleActorDemo.py b/samples/apps/cap/py/demo/SimpleActorDemo.py
index f0f081a5d630..afc398297268 100644
--- a/samples/apps/cap/py/demo/SimpleActorDemo.py
+++ b/samples/apps/cap/py/demo/SimpleActorDemo.py
@@ -1,5 +1,5 @@
from AppAgents import GreeterAgent
-from autogencap.ComponentEnsemble import ComponentEnsemble
+from autogencap.runtime_factory import RuntimeFactory
def simple_actor_demo():
@@ -8,10 +8,10 @@ def simple_actor_demo():
sending a message, and performing cleanup operations.
"""
# CAP Platform
- ensemble = ComponentEnsemble()
+ runtime = RuntimeFactory.get_runtime("ZMQ")
agent = GreeterAgent()
- ensemble.register(agent)
- ensemble.connect()
- greeter_link = ensemble.find_by_name("Greeter")
+ runtime.register(agent)
+ runtime.connect()
+ greeter_link = runtime.find_by_name("Greeter")
greeter_link.send_txt_msg("Hello World!")
- ensemble.disconnect()
+ runtime.disconnect()
diff --git a/samples/apps/cap/py/demo/list_agents.py b/samples/apps/cap/py/demo/list_agents.py
index 3a93dda29422..ca02006df2b7 100644
--- a/samples/apps/cap/py/demo/list_agents.py
+++ b/samples/apps/cap/py/demo/list_agents.py
@@ -2,9 +2,9 @@
from typing import List
from AppAgents import FidelityAgent, GreeterAgent
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.DebugLog import Info
from autogencap.proto.CAP_pb2 import ActorInfo
+from autogencap.runtime_factory import RuntimeFactory
def list_agents():
@@ -14,7 +14,7 @@ def list_agents():
"""
# CAP Platform
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
# Register an actor
ensemble.register(GreeterAgent())
# Register an actor
diff --git a/samples/apps/cap/py/demo/single_threaded.py b/samples/apps/cap/py/demo/single_threaded.py
index d95f67128e64..f4c1fdfb633f 100644
--- a/samples/apps/cap/py/demo/single_threaded.py
+++ b/samples/apps/cap/py/demo/single_threaded.py
@@ -1,8 +1,8 @@
import _paths
from AppAgents import GreeterAgent
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.DebugLog import Error
from autogencap.proto.CAP_pb2 import Ping
+from autogencap.runtime_factory import RuntimeFactory
def single_threaded_demo():
@@ -11,7 +11,7 @@ def single_threaded_demo():
sending a message, and performing cleanup operations.
"""
# CAP Platform
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
agent = GreeterAgent(start_thread=False)
ensemble.register(agent)
ensemble.connect()
diff --git a/samples/apps/cap/py/demo/standalone/UserProxy.py b/samples/apps/cap/py/demo/standalone/UserProxy.py
index 981198072e6f..c2eb4bf42385 100644
--- a/samples/apps/cap/py/demo/standalone/UserProxy.py
+++ b/samples/apps/cap/py/demo/standalone/UserProxy.py
@@ -2,9 +2,9 @@
import _paths
from autogencap.ag_adapter.CAP2AG import CAP2AG
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.Config import IGNORED_LOG_CONTEXTS
from autogencap.DebugLog import Info
+from autogencap.runtime_factory import RuntimeFactory
from autogen import UserProxyAgent, config_list_from_json
@@ -23,7 +23,7 @@ def run(self):
is_termination_msg=lambda x: "TERMINATE" in x.get("content"),
)
# Composable Agent Network adapter
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
user_proxy_adptr = CAP2AG(ag_agent=user_proxy, the_other_name="assistant", init_chat=True, self_recursive=True)
ensemble.register(user_proxy_adptr)
ensemble.connect()
diff --git a/samples/apps/cap/py/demo/standalone/assistant.py b/samples/apps/cap/py/demo/standalone/assistant.py
index 789482e48881..162071e60898 100644
--- a/samples/apps/cap/py/demo/standalone/assistant.py
+++ b/samples/apps/cap/py/demo/standalone/assistant.py
@@ -2,8 +2,8 @@
import _paths
from autogencap.ag_adapter.CAP2AG import CAP2AG
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.DebugLog import Info
+from autogencap.runtime_factory import RuntimeFactory
from autogen import AssistantAgent, config_list_from_json
@@ -18,7 +18,7 @@ def run(self):
config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
assistant = AssistantAgent("assistant", llm_config={"config_list": config_list})
# Composable Agent Network adapter
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
assistant_adptr = CAP2AG(ag_agent=assistant, the_other_name="user_proxy", init_chat=False, self_recursive=True)
ensemble.register(assistant_adptr)
ensemble.connect()
diff --git a/samples/apps/cap/py/demo/standalone/user_proxy.py b/samples/apps/cap/py/demo/standalone/user_proxy.py
index 4859361bb41b..d1183a7b7a6e 100644
--- a/samples/apps/cap/py/demo/standalone/user_proxy.py
+++ b/samples/apps/cap/py/demo/standalone/user_proxy.py
@@ -2,8 +2,8 @@
import _paths
from autogencap.ag_adapter.agent import Agent
-from autogencap.ComponentEnsemble import ComponentEnsemble
from autogencap.Config import IGNORED_LOG_CONTEXTS
+from autogencap.runtime_factory import RuntimeFactory
from autogen import UserProxyAgent
@@ -22,7 +22,7 @@ def main():
# Wrap AutoGen Agent in CAP
cap_user_proxy = Agent(user_proxy, counter_party_name="assistant", init_chat=True)
# Create the message bus
- ensemble = ComponentEnsemble()
+ ensemble = RuntimeFactory.get_runtime("ZMQ")
# Add the user_proxy to the message bus
cap_user_proxy.register(ensemble)
# Start message processing
diff --git a/test/agentchat/test_function_call.py b/test/agentchat/test_function_call.py
index 7023a709d3c1..7745181ea64a 100755
--- a/test/agentchat/test_function_call.py
+++ b/test/agentchat/test_function_call.py
@@ -91,6 +91,12 @@ def test_json_extraction():
jstr = '{"code": "a=\\"hello\\""}'
assert user._format_json_str(jstr) == '{"code": "a=\\"hello\\""}'
+ jstr = '{\n"tool": "python",\n"query": "print(\'hello\')\n\tprint(\'world\')"\n}' # mixed newlines and tabs
+ assert user._format_json_str(jstr) == '{"tool": "python","query": "print(\'hello\')\\n\\tprint(\'world\')"}'
+
+ jstr = "{}" # empty json
+ assert user._format_json_str(jstr) == "{}"
+
def test_execute_function():
from autogen.agentchat import UserProxyAgent
@@ -117,7 +123,7 @@ def add_num(num_to_be_added):
} # should be "given_num" with quotes
assert "The argument must be in JSON format." in user.execute_function(func_call=wrong_json_format)[1]["content"]
- # function execution error with wrong arguments passed
+ # function execution error with extra arguments
wrong_args = {"name": "add_num", "arguments": '{ "num_to_be_added": 5, "given_num": 10 }'}
assert "Error: " in user.execute_function(func_call=wrong_args)[1]["content"]
@@ -143,6 +149,19 @@ def get_number():
func_call = {"name": "get_number", "arguments": "{}"}
assert user.execute_function(func_call)[1]["content"] == "42"
+ # 4. test with a non-existent function
+ user = UserProxyAgent(name="test", function_map={})
+ func_call = {"name": "nonexistent_function", "arguments": "{}"}
+ assert "Error: Function" in user.execute_function(func_call=func_call)[1]["content"]
+
+ # 5. test calling a function that raises an exception
+ def raise_exception():
+ raise ValueError("This is an error")
+
+ user = UserProxyAgent(name="test", function_map={"raise_exception": raise_exception})
+ func_call = {"name": "raise_exception", "arguments": "{}"}
+ assert "Error: " in user.execute_function(func_call=func_call)[1]["content"]
+
@pytest.mark.asyncio
async def test_a_execute_function():
diff --git a/test/agentchat/test_tool_calls.py b/test/agentchat/test_tool_calls.py
index 6a12d2d96edf..e7d45c5918d9 100755
--- a/test/agentchat/test_tool_calls.py
+++ b/test/agentchat/test_tool_calls.py
@@ -208,6 +208,7 @@ class FakeAgent(autogen.Agent):
def __init__(self, name):
self._name = name
self.received = []
+ self.silent = False
@property
def name(self):
@@ -303,6 +304,7 @@ class FakeAgent(autogen.Agent):
def __init__(self, name):
self._name = name
self.received = []
+ self.silent = False
@property
def name(self):
diff --git a/website/docs/topics/llm_configuration.ipynb b/website/docs/topics/llm_configuration.ipynb
index f6f383cd85d8..0c094f6531ed 100644
--- a/website/docs/topics/llm_configuration.ipynb
+++ b/website/docs/topics/llm_configuration.ipynb
@@ -293,6 +293,126 @@
"}"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Using Azure Active Directory (AAD) Authentication\n",
+ "\n",
+ "Azure Active Directory (AAD) provides secure access to resources and applications. Follow the steps below to configure AAD authentication for Autogen.\n",
+ "\n",
+ "#### Prerequisites\n",
+ "- An Azure subscription - [Create one for free](https://azure.microsoft.com/en-us/free/).\n",
+ "- Access granted to the Azure OpenAI Service in the desired Azure subscription.\n",
+ "- Appropriate permissions to register an application in AAD.\n",
+ "- Custom subdomain names are required to enable features like Microsoft Entra ID for authentication.\n",
+ "- Azure CLI - [Installation Guide](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli).\n",
+ "\n",
+ "For more detailed and up-to-date instructions, please refer to the official [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/).\n",
+ "\n",
+ "#### Step 1: Register an Application in AAD\n",
+ "1. Navigate to the [Azure portal](https://azure.microsoft.com/en-us/get-started/azure-portal).\n",
+ "2. Go to `Azure Active Directory` > `App registrations`.\n",
+ "3. Click on `New registration`.\n",
+ "4. Enter a name for your application.\n",
+ "5. Set the `Redirect URI` (optional).\n",
+ "6. Click `Register`.\n",
+ "\n",
+ "For detailed instructions, refer to the official [Azure AD Quickstart documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app?tabs=certificate).\n",
+ "\n",
+ "#### Step 2: Configure API Permissions\n",
+ "1. After registration, go to `API permissions`.\n",
+ "2. Click `Add a permission`.\n",
+ "3. Select `Microsoft Graph` and then `Delegated permissions`.\n",
+ "4. Add the necessary permissions (e.g., `User.Read`).\n",
+ "\n",
+ "For more details, see [API permissions in Microsoft Graph](https://learn.microsoft.com/en-us/entra/identity-platform/permissions-consent-overview)\n",
+ "\n",
+ "#### Step 3: Obtain Client ID and Tenant ID\n",
+ "1. Go to `Overview` of your registered application.\n",
+ "2. Note down the `Application (client) ID` and `Directory (tenant) ID`.\n",
+ "\n",
+ "For more details, visit [Register an application with the Microsoft identity platform](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app?tabs=certificate)\n",
+ "\n",
+ "#### Step 4: Configure Your Application\n",
+ "Use the obtained `Client ID` and `Tenant ID` in your application configuration. Hereβs an example of how to do this in your configuration file:\n",
+ "```\n",
+ "aad_config = {\n",
+ " \"client_id\": \"YOUR_CLIENT_ID\",\n",
+ " \"tenant_id\": \"YOUR_TENANT_ID\",\n",
+ " \"authority\": \"https://login.microsoftonline.com/YOUR_TENANT_ID\",\n",
+ " \"scope\": [\"https://graph.microsoft.com/.default\"],\n",
+ "}\n",
+ "```\n",
+ "#### Step 5: Authenticate and Acquire Tokens\n",
+ "Use the following code to authenticate and acquire tokens:\n",
+ "\n",
+ "```\n",
+ "from msal import ConfidentialClientApplication\n",
+ "\n",
+ "app = ConfidentialClientApplication(\n",
+ " client_id=aad_config[\"client_id\"],\n",
+ " client_credential=\"YOUR_CLIENT_SECRET\",\n",
+ " authority=aad_config[\"authority\"]\n",
+ ")\n",
+ "\n",
+ "result = app.acquire_token_for_client(scopes=aad_config[\"scope\"])\n",
+ "\n",
+ "if \"access_token\" in result:\n",
+ " print(\"Token acquired\")\n",
+ "else:\n",
+ " print(\"Error acquiring token:\", result.get(\"error\"))\n",
+ "```\n",
+ "\n",
+ "For more details, refer to the [Authenticate and authorize in Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/api-management/api-management-authenticate-authorize-azure-openai) and [How to configure Azure OpenAI Service with Microsoft Entra ID authentication](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity).\n",
+ "\n",
+ "\n",
+ "#### Step 6: Configure Azure OpenAI with AAD Auth in AutoGen\n",
+ "To use AAD authentication with Azure OpenAI in AutoGen, configure the `llm_config` with the necessary parameters.\n",
+ "\n",
+ "Here is an example configuration:\n",
+ "\n",
+ "```\n",
+ "llm_config = {\n",
+ " \"config_list\": [\n",
+ " {\n",
+ " \"model\": \"gpt-4\",\n",
+ " \"base_url\": \"YOUR_BASE_URL\",\n",
+ " \"api_type\": \"azure\",\n",
+ " \"api_version\": \"2024-02-01\",\n",
+ " \"max_tokens\": 1000,\n",
+ " \"azure_ad_token_provider\": \"DEFAULT\"\n",
+ " }\n",
+ " ]\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "For more details, refer to the [Authenticate and authorize in Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/api-management/api-management-authenticate-authorize-azure-openai) and [How to configure Azure OpenAI Service with Microsoft Entra ID authentication](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity).\n",
+ "\n",
+ "In this configuration:\n",
+ "- `model`: The Azure OpenAI deployment name.\n",
+ "- `base_url`: The base URL of the Azure OpenAI endpoint.\n",
+ "- `api_type`: Should be set to \"azure\".\n",
+ "- `api_version`: The API version to use.\n",
+ "- `azure_ad_token_provider`: Set to \"DEFAULT\" to use the default token provider.\n",
+ "\n",
+ "#### Example of Initializing an Assistant Agent with AAD Auth\n",
+ "```\n",
+ "import autogen\n",
+ "\n",
+ "# Initialize the assistant agent with the AAD authenticated config\n",
+ "assistant = autogen.AssistantAgent(name=\"assistant\", llm_config=llm_config)\n",
+ "```\n",
+ "\n",
+ "#### Troubleshooting\n",
+ "If you encounter issues, check the following:\n",
+ "- Ensure your `Client ID` and `Tenant ID` are correct.\n",
+ "- Verify the permissions granted to your application.\n",
+ "- Check network connectivity and Azure service status.\n",
+ "\n",
+ "This documentation provides a complete guide to configure and use AAD authentication with Azure OpenAI in the AutoGen.\n"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},