diff --git a/api/main.py b/api/main.py index 101236344..bc1a5d624 100644 --- a/api/main.py +++ b/api/main.py @@ -962,7 +962,6 @@ def create_app() -> FastAPI: logger.info("FastAPI application created successfully") return app - def run_server(): """Run the API server""" try: @@ -974,8 +973,7 @@ def run_server(): asyncio.run(server.startup()) except Exception as e: logger.error(f"Failed to start API: {str(e)}") - print(f"Error starting server: {str(e)}" - + print(f"Error starting server: {str(e)}") # <-- Fixed here if __name__ == "__main__": run_server() diff --git a/new_features_examples/multi_tool_usage_agent.py b/new_features_examples/multi_tool_usage_agent.py index 1af421e25..33c61abe8 100644 --- a/new_features_examples/multi_tool_usage_agent.py +++ b/new_features_examples/multi_tool_usage_agent.py @@ -19,7 +19,7 @@ class ToolDefinition: callable: Optional[Callable] = None -def extract_type_hints(func: Callable) -> Dict[str, Any]: +def extract_type_hints(func: Callable) -> dict: """Extract parameter types from function type hints.""" return typing.get_type_hints(func) diff --git a/pyproject.toml b/pyproject.toml index a2516e755..f486ab4ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,7 +73,6 @@ docstring_parser = "0.16" # TODO: tiktoken = "*" networkx = "*" aiofiles = "*" -clusterops = "*" # chromadb = "*" reportlab = "*" doc-master = "*" @@ -81,6 +80,7 @@ rich = "*" # sentence-transformers = "*" swarm-models = "*" termcolor = "*" +clusterops = { git = "https://github.com/patrickbdevaney/clusterops.git", branch = "main" } # [tool.poetry.extras] diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 9df994c33..9b671448a 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -4,26 +4,30 @@ from concurrent.futures import ThreadPoolExecutor from datetime import datetime from typing import Any, Dict, List, Optional, Union +import concurrent from pydantic import BaseModel, Field from tenacity import retry, stop_after_attempt, wait_exponential -from swarms.structs.agent import Agent -from swarms.structs.base_swarm import BaseSwarm -from swarms.utils.file_processing import create_file_in_folder -import concurrent from clusterops import ( execute_on_gpu, execute_with_cpu_cores, execute_on_multiple_gpus, list_available_gpus, ) + +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm +from swarms.utils.file_processing import create_file_in_folder + from swarms.utils.loguru_logger import initialize_logger from swarms.structs.swarm_id_generator import generate_swarm_id logger = initialize_logger(log_folder="concurrent_workflow") + + class AgentOutputSchema(BaseModel): run_id: Optional[str] = Field( ..., description="Unique ID for the run" diff --git a/swarms/structs/spreadsheet_swarm.py b/swarms/structs/spreadsheet_swarm.py index 2e691f78f..8215bf2a8 100644 --- a/swarms/structs/spreadsheet_swarm.py +++ b/swarms/structs/spreadsheet_swarm.py @@ -1,6 +1,6 @@ import asyncio import csv -import datetime +from datetime import datetime import os import uuid from typing import List, Union @@ -16,16 +16,8 @@ logger = initialize_logger(log_folder="spreadsheet_swarm") -time = datetime.datetime.now().isoformat() -uuid_hex = uuid.uuid4().hex - -# --------------- NEW CHANGE START --------------- -# Format time variable to be compatible across operating systems -formatted_time = datetime.now().strftime("%Y-%m-%dT%H-%M-%S") - -# Create the save file path with the formatted time and UUID hex -self.save_file_path = f"spreadsheet_swarm_{formatted_time}_run_id_{uuid_hex}.csv" -# --------------- NEW CHANGE END --------------- +# Replace timestamp-based time with a UUID for file naming +run_id = uuid.uuid4().hex # Unique identifier for each run class AgentOutput(BaseModel): agent_name: str @@ -36,13 +28,13 @@ class AgentOutput(BaseModel): class SwarmRunMetadata(BaseModel): run_id: str = Field( - default_factory=lambda: f"spreadsheet_swarm_run_{uuid_hex}" + default_factory=lambda: f"spreadsheet_swarm_run_{run_id}" ) name: str description: str agents: List[str] start_time: str = Field( - default_factory=lambda: time, + default_factory=lambda: str(datetime.now().timestamp()), # Numeric timestamp description="The start time of the swarm run.", ) end_time: str @@ -73,7 +65,7 @@ class SpreadSheetSwarm(BaseSwarm): def __init__( self, name: str = "Spreadsheet-Swarm", - description: str = "A swarm that that processes tasks concurrently using multiple agents and saves the metadata to a CSV file.", + description: str = "A swarm that processes tasks concurrently using multiple agents and saves the metadata to a CSV file.", agents: Union[Agent, List[Agent]] = [], autosave_on: bool = True, save_file_path: str = None, @@ -96,17 +88,18 @@ def __init__( self.max_loops = max_loops self.workspace_dir = workspace_dir - # --------------- NEW CHANGE START --------------- - # The save_file_path now uses the formatted_time and uuid_hex - self.save_file_path = f"spreadsheet_swarm_{formatted_time}_run_id_{uuid_hex}.csv" - # --------------- NEW CHANGE END --------------- + # Create a timestamp without colons or periods + timestamp = datetime.now().isoformat().replace(":", "_").replace(".", "_") + + # Use this timestamp in the CSV filename + self.save_file_path = f"spreadsheet_swarm_{timestamp}_run_id_{run_id}.csv" self.metadata = SwarmRunMetadata( - run_id=f"spreadsheet_swarm_run_{time}", + run_id=f"spreadsheet_swarm_run_{run_id}", name=name, description=description, agents=[agent.name for agent in agents], - start_time=time, + start_time=str(datetime.now().timestamp()), # Numeric timestamp end_time="", tasks_completed=0, outputs=[], @@ -149,12 +142,18 @@ def run(self, task: str, *args, **kwargs): """ logger.info(f"Running the swarm with task: {task}") - self.metadata.start_time = time + self.metadata.start_time = str(datetime.now().timestamp()) # Numeric timestamp - # Run the asyncio event loop - asyncio.run(self._run_tasks(task, *args, **kwargs)) + # Check if we're already in an event loop + if asyncio.get_event_loop().is_running(): + # If so, create and run tasks directly using `create_task` without `asyncio.run` + task_future = asyncio.create_task(self._run_tasks(task, *args, **kwargs)) + asyncio.get_event_loop().run_until_complete(task_future) + else: + # If no event loop is running, run using `asyncio.run` + asyncio.run(self._run_tasks(task, *args, **kwargs)) - self.metadata.end_time = time + self.metadata.end_time = str(datetime.now().timestamp()) # Numeric timestamp # Synchronously save metadata logger.info("Saving metadata to CSV and JSON...") @@ -229,7 +228,7 @@ def _track_output(self, agent_name: str, task: str, result: str): agent_name=agent_name, task=task, result=result, - timestamp=time, + timestamp=str(datetime.now().timestamp()), # Numeric timestamp ) ) @@ -265,38 +264,19 @@ async def _save_to_csv(self): """ Save the swarm metadata to a CSV file. """ - logger.info( - f"Saving swarm metadata to: {self.save_file_path}" - ) + logger.info(f"Saving swarm metadata to: {self.save_file_path}") run_id = uuid.uuid4() # Check if file exists before opening it file_exists = os.path.exists(self.save_file_path) - async with aiofiles.open( - self.save_file_path, mode="a" - ) as file: - writer = csv.writer(file) - + async with aiofiles.open(self.save_file_path, mode="a") as file: # Write header if file doesn't exist if not file_exists: - await writer.writerow( - [ - "Run ID", - "Agent Name", - "Task", - "Result", - "Timestamp", - ] - ) + header = "Run ID,Agent Name,Task,Result,Timestamp\n" + await file.write(header) + # Write each output as a new row for output in self.metadata.outputs: - await writer.writerow( - [ - str(run_id), - output.agent_name, - output.task, - output.result, - output.timestamp, - ] - ) + row = f"{run_id},{output.agent_name},{output.task},{output.result},{output.timestamp}\n" + await file.write(row) \ No newline at end of file diff --git a/swarms/tools/func_calling_utils.py b/swarms/tools/func_calling_utils.py index c5a5bd83d..b02a95ec4 100644 --- a/swarms/tools/func_calling_utils.py +++ b/swarms/tools/func_calling_utils.py @@ -122,9 +122,9 @@ def prepare_output_for_output_model( """ if output_type == BaseModel: return str_to_pydantic_model(output, output_type) - elif output_type == dict: + elif output_type is dict: return dict_to_json_str(output) - elif output_type == str: + elif output_type is str: return output else: return output diff --git a/tests/agent_evals/auto_test_eval.py b/tests/agent_evals/auto_test_eval.py index b9c770fab..c327197bf 100644 --- a/tests/agent_evals/auto_test_eval.py +++ b/tests/agent_evals/auto_test_eval.py @@ -93,26 +93,45 @@ def __init__( self.last_issue_time = datetime.now() def _get_swarms_version(self) -> str: - """Get the installed version of Swarms.""" - try: - import swarms + """Get the installed version of Swarms.""" + try: + import swarms - return swarms.__version__ - except: - return "Unknown" + return swarms.__version__ + except: + return "Unknown" def _get_gpu_info(self) -> Tuple[bool, Optional[str]]: - """Get GPU information and CUDA availability.""" - try: - import torch + """Get GPU information and CUDA availability.""" + try: + import torch + + cuda_available = torch.cuda.is_available() + if cuda_available: + gpu_info = torch.cuda.get_device_name(0) + return cuda_available, gpu_info + return False, None + except ModuleNotFoundError as e: + # Handle the case where 'torch' is not installed + print(f"Error: {e}") + return False, None + except RuntimeError as e: + # Handle CUDA-related errors + print(f"Error: {e}")def _get_swarms_version(self) -> str: + """Get the installed version of Swarms.""" + try: + import swarms + + return swarms.__version__ + except: + return "Unknown" + + return False, None + except Exception as e: + # Catch any other exceptions + print(f"Unexpected error: {e}") + return False, None - cuda_available = torch.cuda.is_available() - if cuda_available: - gpu_info = torch.cuda.get_device_name(0) - return cuda_available, gpu_info - return False, None - except: - return False, None def _get_system_info(self) -> SwarmSystemInfo: """Collect system and Swarms-specific information.""" @@ -199,16 +218,23 @@ def _format_swarms_issue_body( """ def _get_dependencies_info(self) -> str: - """Get information about installed dependencies.""" - try: - import pkg_resources - - deps = [] - for dist in pkg_resources.working_set: - deps.append(f"- {dist.key} {dist.version}") - return "\n".join(deps) - except: - return "Unable to fetch dependency information" + """Get information about installed dependencies.""" + try: + import pkg_resources + + deps = [] + for dist in pkg_resources.working_set: + deps.append(f"- {dist.key} {dist.version}") + return "\n".join(deps) + except ImportError as e: + # Handle the case where pkg_resources is not available + print(f"Error: {e}") + return "Unable to fetch dependency information" + except Exception as e: + # Catch any other unexpected exceptions + print(f"Unexpected error: {e}") + return "Unable to fetch dependency information" + # First, add this method to your SwarmsIssueReporter class def _check_rate_limit(self) -> bool: diff --git a/tests/profiling_agent.py b/tests/profiling_agent.py index 8f1b02206..cb04552c9 100644 --- a/tests/profiling_agent.py +++ b/tests/profiling_agent.py @@ -1,7 +1,4 @@ import time - -start_time = time.time() - import os import uuid from swarms import Agent @@ -9,7 +6,7 @@ from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) - +start_time = time.time() # Get the OpenAI API key from the environment variable api_key = os.getenv("OPENAI_API_KEY") diff --git a/tests/structs/test_spreadsheet_swarm.py b/tests/structs/test_spreadsheet_swarm.py new file mode 100644 index 000000000..4f1346c1c --- /dev/null +++ b/tests/structs/test_spreadsheet_swarm.py @@ -0,0 +1,65 @@ +import os +from datetime import datetime +from uuid import uuid4 +# Import necessary classes from your swarm module +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm +from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.utils.file_processing import create_file_in_folder +from swarms import SpreadSheetSwarm +# Ensure you have an environment variable or default workspace dir +workspace_dir = os.getenv("WORKSPACE_DIR", "./workspace") +def create_agents(num_agents: int): + """ + Create a list of agent instances. + + Args: + num_agents (int): The number of agents to create. + + Returns: + List[Agent]: List of created Agent objects. + """ + agents = [] + for i in range(num_agents): + agent_name = f"Agent-{i + 1}" + agents.append(Agent(agent_name=agent_name)) + return agents +def main(): + # Number of agents to create + num_agents = 5 + # Create the agents + agents = create_agents(num_agents) + # Initialize the swarm with agents and other configurations + swarm = SpreadSheetSwarm( + name="Test-Swarm", + description="A swarm for testing purposes.", + agents=agents, + autosave_on=True, + max_loops=2, + workspace_dir=workspace_dir + ) + # Run a sample task in the swarm (synchronously) + task = "process_data" + + # Ensure the run method is synchronous + swarm_metadata = swarm.run(task) # Assuming this is made synchronous + # Print swarm metadata after task completion + print("Swarm Metadata:") + print(swarm_metadata) + # Check if CSV file has been created and saved + if os.path.exists(swarm.save_file_path): + print(f"Metadata saved to: {swarm.save_file_path}") + else: + print(f"Metadata not saved correctly. Check the save path.") + # Test saving metadata to JSON file + swarm.data_to_json_file() + # Test exporting metadata to JSON + swarm_json = swarm.export_to_json() + print("Exported JSON metadata:") + print(swarm_json) + # Log agent data + print("Logging agent data:") + print(log_agent_data(swarm.metadata.model_dump())) +# Run the synchronous main function +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/structs/test_spreadsheet_swarm_2.py b/tests/structs/test_spreadsheet_swarm_2.py new file mode 100644 index 000000000..4f1346c1c --- /dev/null +++ b/tests/structs/test_spreadsheet_swarm_2.py @@ -0,0 +1,65 @@ +import os +from datetime import datetime +from uuid import uuid4 +# Import necessary classes from your swarm module +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm +from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.utils.file_processing import create_file_in_folder +from swarms import SpreadSheetSwarm +# Ensure you have an environment variable or default workspace dir +workspace_dir = os.getenv("WORKSPACE_DIR", "./workspace") +def create_agents(num_agents: int): + """ + Create a list of agent instances. + + Args: + num_agents (int): The number of agents to create. + + Returns: + List[Agent]: List of created Agent objects. + """ + agents = [] + for i in range(num_agents): + agent_name = f"Agent-{i + 1}" + agents.append(Agent(agent_name=agent_name)) + return agents +def main(): + # Number of agents to create + num_agents = 5 + # Create the agents + agents = create_agents(num_agents) + # Initialize the swarm with agents and other configurations + swarm = SpreadSheetSwarm( + name="Test-Swarm", + description="A swarm for testing purposes.", + agents=agents, + autosave_on=True, + max_loops=2, + workspace_dir=workspace_dir + ) + # Run a sample task in the swarm (synchronously) + task = "process_data" + + # Ensure the run method is synchronous + swarm_metadata = swarm.run(task) # Assuming this is made synchronous + # Print swarm metadata after task completion + print("Swarm Metadata:") + print(swarm_metadata) + # Check if CSV file has been created and saved + if os.path.exists(swarm.save_file_path): + print(f"Metadata saved to: {swarm.save_file_path}") + else: + print(f"Metadata not saved correctly. Check the save path.") + # Test saving metadata to JSON file + swarm.data_to_json_file() + # Test exporting metadata to JSON + swarm_json = swarm.export_to_json() + print("Exported JSON metadata:") + print(swarm_json) + # Log agent data + print("Logging agent data:") + print(log_agent_data(swarm.metadata.model_dump())) +# Run the synchronous main function +if __name__ == "__main__": + main() \ No newline at end of file