Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added base group chat using Flow class #82

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
__pycache__/
.venv/

.idea
.env

image/
Expand Down
64 changes: 64 additions & 0 deletions playground/models/autotemp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from swarms.models import OpenAIChat # Replace with your actual OpenAIChat import
from termcolor import colored

class MultiTempAgent:
def __init__(self, api_key, default_temp=0.5, alt_temps=None, auto_select=True):
self.api_key = api_key
self.default_temp = default_temp
self.alt_temps = alt_temps if alt_temps else [0.1, 0.3, 0.7, 0.9] # Default alternative temperatures
self.auto_select = auto_select

def ask_user_feedback(self, text):
print(colored("Generated text:", "green"))
print(colored(text, "white"))
feedback = input(colored("Are you satisfied with this output? (yes/no): ", "green"))
return feedback.lower() == 'yes'

def present_options_to_user(self, outputs):
print(colored("Alternative outputs:", "green"))
for temp, output in outputs.items():
print(colored(f"Temperature {temp}:", "green") + colored(f" {output}", "blue"))
chosen_temp = float(input(colored("Choose the temperature of the output you like: ", "green")))
return outputs.get(chosen_temp, "Invalid temperature chosen.")

def run(self, prompt):
try:
llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp)
initial_output = llm(prompt)
except Exception as e:
print(colored(f"Error generating initial output: {e}", "red"))
initial_output = None

user_satisfied = self.ask_user_feedback(initial_output)

if user_satisfied:
return initial_output
else:
outputs = {}
scores = {}
for temp in self.alt_temps:
try:
llm = OpenAIChat(openai_api_key=self.api_key, temperature=temp)
outputs[temp] = llm(prompt)
eval_prompt = f"Rate the quality of the following output for our specific task on a scale from 1 to 10. Output only an integer. The output is: {outputs[temp]}"
score_str = llm(eval_prompt)
scores[temp] = int(score_str.strip())
except Exception as e:
print(colored(f"Error generating text at temperature {temp}: {e}", "red"))
outputs[temp] = None
scores[temp] = 0

if self.auto_select:
best_temp = max(scores, key=scores.get)
print(colored(f"Automatically selected output from Temperature {best_temp}:", "green"))
print(colored(outputs[best_temp], "white"))
return outputs[best_temp]
else:
chosen_output = self.present_options_to_user(outputs)
return chosen_output

if __name__ == "__main__":
api_key = "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" # Your OpenAI API key here
agent = MultiTempAgent(api_key, auto_select=True) # Set auto_select to False if you want manual selection
prompt = "Write a creative short story about a purple dragon"
final_output = agent.run(prompt)
Binary file not shown.
30 changes: 26 additions & 4 deletions swarms/structs/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
- Add configurable save and restore so the user can restore from previus flows
- Add memory vector database retrieval
"""

import asyncio
import copy
import json
import logging
import time
from typing import Any, Callable, Dict, List, Optional, Tuple, Generator
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, Generator, Union
from termcolor import colored
import inspect
import random
Expand Down Expand Up @@ -103,7 +105,8 @@ def __init__(
retry_interval: int = 1,
interactive: bool = False,
dashboard: bool = False,
# tools: List[BaseTool] = None,
name: str = "flow-agent",
system_message: str = FLOW_SYSTEM_PROMPT,
dynamic_temperature: bool = False,
**kwargs: Any,
):
Expand All @@ -121,7 +124,8 @@ def __init__(
self.interactive = interactive
self.dashboard = dashboard
self.dynamic_temperature = dynamic_temperature
# self.tools = tools
self.system_message = system_message
self.name = name

def provide_feedback(self, feedback: str) -> None:
"""Allow users to provide feedback on the responses."""
Expand Down Expand Up @@ -257,6 +261,7 @@ def run(self, task: str, **kwargs):
)
# print(f"Next query: {response}")
# break

if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
Expand Down Expand Up @@ -475,3 +480,20 @@ def streamed_token_generation(self, prompt: str) -> Generator[str, None, None]:
for token in tokens:
time.sleep(0.1)
yield token

def generate_reply(self, history:str, **kwargs) -> str:
"""
Generate a response based on the initial task.
"""
prompt = f"""SYSTEM_PROMPT:{self.system_message}
History:
{history}
Your response:"""
response = self.llm(prompt, **kwargs)
return {"role": self.name, "content": response}


def update_system_message(self, system_message: str):
"""Update the system message.
"""
self.system_message = system_message
224 changes: 140 additions & 84 deletions swarms/swarms/groupchat.py
Original file line number Diff line number Diff line change
@@ -1,89 +1,145 @@
from swarms.agents import SimpleAgent
from termcolor import colored
from dataclasses import dataclass
import sys
from typing import Dict, List, Optional, Union
import logging

from swarms import Flow, OpenAI

class GroupChat:
"""
Groupchat

Args:
agents (list): List of agents
dashboard (bool): Whether to print a dashboard or not

Example:
>>> from swarms.structs import Flow
>>> from swarms.models import OpenAIChat
>>> from swarms.swarms.groupchat import GroupChat
>>> from swarms.agents import SimpleAgent
>>> api_key = ""
>>> llm = OpenAIChat()
>>> agent1 = SimpleAgent("Captain Price", Flow(llm=llm, max_loops=4))
>>> agent2 = SimpleAgent("John Mactavis", Flow(llm=llm, max_loops=4))
>>> chat = GroupChat([agent1, agent2])
>>> chat.assign_duty(agent1.name, "Buy the groceries")
>>> chat.assign_duty(agent2.name, "Clean the house")
>>> response = chat.run("Captain Price", "Hello, how are you John?")
>>> print(response)



"""

def __init__(self, agents, dashboard: bool = False):
# Ensure that all provided agents are instances of simpleagents
if not all(isinstance(agent, SimpleAgent) for agent in agents):
raise ValueError("All agents must be instances of SimpleAgent")
self.agents = {agent.name: agent for agent in agents}

# Dictionary to store duties for each agent
self.duties = {}

# Dictionary to store roles for each agent
self.roles = {}

self.dashboard = dashboard

def assign_duty(self, agent_name, duty):
"""Assigns duty to the agent"""
if agent_name not in self.agents:
raise ValueError(f"No agent named {agent_name} found.")

def assign_role(self, agent_name, role):
"""Assigns a role to the specified agent"""
if agent_name not in self.agents:
raise ValueError(f"No agent named {agent_name} found")

self.roles[agent_name] = role

def run(self, sender_name: str, message: str):
"""Runs the groupchat"""
if self.dashboard:
metrics = print(
colored(
f"""

Groupchat Configuration:
------------------------

Agents: {self.agents}
Message: {message}
Sender: {sender_name}
""",
"red",
)
)
logger = logging.getLogger(__name__)

print(metrics)

responses = {}
for agent_name, agent in self.agents.items():
if agent_name != sender_name:
if agent_name in self.duties:
message += f"Your duty is {self.duties[agent_name]}"
if agent_name in self.roles:
message += (
f"You are the {self.roles[agent_name]} in this conversation"
)
@dataclass
class GroupChat:
"""A group chat class that contains a list of agents and the maximum number of rounds."""

agents: List[Flow]
messages: List[Dict]
max_round: int = 10
admin_name: str = "Admin" # the name of the admin agent

@property
def agent_names(self) -> List[str]:
"""Return the names of the agents in the group chat."""
return [agent.name for agent in self.agents]

def reset(self):
"""Reset the group chat."""
self.messages.clear()

def agent_by_name(self, name: str) -> Flow:
"""Find an agent whose name is contained within the given 'name' string."""
for agent in self.agents:
if agent.name in name:
return agent
raise ValueError(f"No agent found with a name contained in '{name}'.")

def next_agent(self, agent: Flow) -> Flow:
"""Return the next agent in the list."""
return self.agents[(self.agent_names.index(agent.name) + 1) % len(self.agents)]

def select_speaker_msg(self):
"""Return the message for selecting the next speaker."""
return f"""You are in a role play game. The following roles are available:
{self._participant_roles()}.

Read the following conversation.
Then select the next role from {self.agent_names} to play. Only return the role."""

def select_speaker(self, last_speaker: Flow, selector: Flow):
"""Select the next speaker."""
selector.update_system_message(self.select_speaker_msg())

# Warn if GroupChat is underpopulated, without established changing behavior
n_agents = len(self.agent_names)
if n_agents < 3:
logger.warning(
f"GroupChat is underpopulated with {n_agents} agents. Direct communication would be more efficient."
)

responses[agent_name] = agent.run(message)
return responses
name = selector.generate_reply(
self.format_history(self.messages
+ [
{
"role": "system",
"content": f"Read the above conversation. Then select the next most suitable role from {self.agent_names} to play. Only return the role.",
}
])
)
try:
return self.agent_by_name(name['content'])
except ValueError:
return self.next_agent(last_speaker)

def _participant_roles(self):
return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])

def format_history(self, messages: List[Dict]) -> str:
formatted_messages = []
for message in messages:
formatted_message = f"'{message['role']}:{message['content']}"
formatted_messages.append(formatted_message)
return '\n'.join(formatted_messages)

class GroupChatManager:
def __init__(self, groupchat: GroupChat, selector: Flow):
self.groupchat = groupchat
self.selector = selector



def run_chat(self, task: str):
self.groupchat.messages.append({'role':self.selector.name, 'content': task})
for i in range(self.groupchat.max_round):
speaker = self.groupchat.select_speaker(last_speaker=self.selector, selector=self.selector)
reply = speaker.generate_reply(self.groupchat.format_history(self.groupchat.messages))
self.groupchat.messages.append(reply)
print(reply)
if i == self.groupchat.max_round - 1:
break

return reply


llm = OpenAI(
openai_api_key="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",
temperature=0.5,
max_tokens=3000,
)

# Initialize the flow
flow1 = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
name='silly',
dashboard=True,
)
flow2 = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE VERY SMART AND ANSWER RIDDLES",
name='detective',
dashboard=True,
)
flow3 = Flow(
llm=llm,
max_loops=1,
system_message="YOU MAKE RIDDLES",
name='riddler',
dashboard=True,
)
manager = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE A GROUP CHAT MANAGER",
name='manager',
dashboard=True,
)


# Example usage:
agents = [flow1, flow2, flow3]

group_chat = GroupChat(agents=agents, messages=[], max_round=5)
chat_manager = GroupChatManager(groupchat=group_chat, selector = manager)
chat_history = chat_manager.run_chat("Write me a riddle and answer it")
Loading