Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Hotfix using datetime.now().strftime() to format the time on spreadsheet_swarm.py #700

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
4 changes: 1 addition & 3 deletions api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -962,7 +962,6 @@ def create_app() -> FastAPI:
logger.info("FastAPI application created successfully")
return app


def run_server():
"""Run the API server"""
try:
Expand All @@ -974,8 +973,7 @@ def run_server():
asyncio.run(server.startup())
except Exception as e:
logger.error(f"Failed to start API: {str(e)}")
print(f"Error starting server: {str(e)}"

print(f"Error starting server: {str(e)}") # <-- Fixed here

if __name__ == "__main__":
run_server()
2 changes: 1 addition & 1 deletion new_features_examples/multi_tool_usage_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class ToolDefinition:
callable: Optional[Callable] = None


def extract_type_hints(func: Callable) -> Dict[str, Any]:
def extract_type_hints(func: Callable) -> dict:
"""Extract parameter types from function type hints."""
return typing.get_type_hints(func)

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,14 +73,14 @@ docstring_parser = "0.16" # TODO:
tiktoken = "*"
networkx = "*"
aiofiles = "*"
clusterops = "*"
# chromadb = "*"
reportlab = "*"
doc-master = "*"
rich = "*"
# sentence-transformers = "*"
swarm-models = "*"
termcolor = "*"
clusterops = { git = "https://github.com/patrickbdevaney/clusterops.git", branch = "main" }


# [tool.poetry.extras]
Expand Down
12 changes: 8 additions & 4 deletions swarms/structs/concurrent_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,30 @@
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
import concurrent

from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_exponential

from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.file_processing import create_file_in_folder
import concurrent
from clusterops import (
execute_on_gpu,
execute_with_cpu_cores,
execute_on_multiple_gpus,
list_available_gpus,
)

from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.file_processing import create_file_in_folder

from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.swarm_id_generator import generate_swarm_id

logger = initialize_logger(log_folder="concurrent_workflow")




class AgentOutputSchema(BaseModel):
run_id: Optional[str] = Field(
..., description="Unique ID for the run"
Expand Down
82 changes: 31 additions & 51 deletions swarms/structs/spreadsheet_swarm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import asyncio
import csv
import datetime
from datetime import datetime
import os
import uuid
from typing import List, Union
Expand All @@ -16,16 +16,8 @@

logger = initialize_logger(log_folder="spreadsheet_swarm")

time = datetime.datetime.now().isoformat()
uuid_hex = uuid.uuid4().hex

# --------------- NEW CHANGE START ---------------
# Format time variable to be compatible across operating systems
formatted_time = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")

# Create the save file path with the formatted time and UUID hex
self.save_file_path = f"spreadsheet_swarm_{formatted_time}_run_id_{uuid_hex}.csv"
# --------------- NEW CHANGE END ---------------
# Replace timestamp-based time with a UUID for file naming
run_id = uuid.uuid4().hex # Unique identifier for each run

class AgentOutput(BaseModel):
agent_name: str
Expand All @@ -36,13 +28,13 @@ class AgentOutput(BaseModel):

class SwarmRunMetadata(BaseModel):
run_id: str = Field(
default_factory=lambda: f"spreadsheet_swarm_run_{uuid_hex}"
default_factory=lambda: f"spreadsheet_swarm_run_{run_id}"
)
name: str
description: str
agents: List[str]
start_time: str = Field(
default_factory=lambda: time,
default_factory=lambda: str(datetime.now().timestamp()), # Numeric timestamp
description="The start time of the swarm run.",
)
end_time: str
Expand Down Expand Up @@ -73,7 +65,7 @@ class SpreadSheetSwarm(BaseSwarm):
def __init__(
self,
name: str = "Spreadsheet-Swarm",
description: str = "A swarm that that processes tasks concurrently using multiple agents and saves the metadata to a CSV file.",
description: str = "A swarm that processes tasks concurrently using multiple agents and saves the metadata to a CSV file.",
agents: Union[Agent, List[Agent]] = [],
autosave_on: bool = True,
save_file_path: str = None,
Expand All @@ -96,17 +88,18 @@ def __init__(
self.max_loops = max_loops
self.workspace_dir = workspace_dir

# --------------- NEW CHANGE START ---------------
# The save_file_path now uses the formatted_time and uuid_hex
self.save_file_path = f"spreadsheet_swarm_{formatted_time}_run_id_{uuid_hex}.csv"
# --------------- NEW CHANGE END ---------------
# Create a timestamp without colons or periods
timestamp = datetime.now().isoformat().replace(":", "_").replace(".", "_")

# Use this timestamp in the CSV filename
self.save_file_path = f"spreadsheet_swarm_{timestamp}_run_id_{run_id}.csv"

self.metadata = SwarmRunMetadata(
run_id=f"spreadsheet_swarm_run_{time}",
run_id=f"spreadsheet_swarm_run_{run_id}",
name=name,
description=description,
agents=[agent.name for agent in agents],
start_time=time,
start_time=str(datetime.now().timestamp()), # Numeric timestamp
end_time="",
tasks_completed=0,
outputs=[],
Expand Down Expand Up @@ -149,12 +142,18 @@ def run(self, task: str, *args, **kwargs):

"""
logger.info(f"Running the swarm with task: {task}")
self.metadata.start_time = time
self.metadata.start_time = str(datetime.now().timestamp()) # Numeric timestamp

# Run the asyncio event loop
asyncio.run(self._run_tasks(task, *args, **kwargs))
# Check if we're already in an event loop
if asyncio.get_event_loop().is_running():
# If so, create and run tasks directly using `create_task` without `asyncio.run`
task_future = asyncio.create_task(self._run_tasks(task, *args, **kwargs))
asyncio.get_event_loop().run_until_complete(task_future)
else:
# If no event loop is running, run using `asyncio.run`
asyncio.run(self._run_tasks(task, *args, **kwargs))

self.metadata.end_time = time
self.metadata.end_time = str(datetime.now().timestamp()) # Numeric timestamp

# Synchronously save metadata
logger.info("Saving metadata to CSV and JSON...")
Expand Down Expand Up @@ -229,7 +228,7 @@ def _track_output(self, agent_name: str, task: str, result: str):
agent_name=agent_name,
task=task,
result=result,
timestamp=time,
timestamp=str(datetime.now().timestamp()), # Numeric timestamp
)
)

Expand Down Expand Up @@ -265,38 +264,19 @@ async def _save_to_csv(self):
"""
Save the swarm metadata to a CSV file.
"""
logger.info(
f"Saving swarm metadata to: {self.save_file_path}"
)
logger.info(f"Saving swarm metadata to: {self.save_file_path}")
run_id = uuid.uuid4()

# Check if file exists before opening it
file_exists = os.path.exists(self.save_file_path)

async with aiofiles.open(
self.save_file_path, mode="a"
) as file:
writer = csv.writer(file)

async with aiofiles.open(self.save_file_path, mode="a") as file:
# Write header if file doesn't exist
if not file_exists:
await writer.writerow(
[
"Run ID",
"Agent Name",
"Task",
"Result",
"Timestamp",
]
)
header = "Run ID,Agent Name,Task,Result,Timestamp\n"
await file.write(header)

# Write each output as a new row
for output in self.metadata.outputs:
await writer.writerow(
[
str(run_id),
output.agent_name,
output.task,
output.result,
output.timestamp,
]
)
row = f"{run_id},{output.agent_name},{output.task},{output.result},{output.timestamp}\n"
await file.write(row)
4 changes: 2 additions & 2 deletions swarms/tools/func_calling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,9 @@ def prepare_output_for_output_model(
"""
if output_type == BaseModel:
return str_to_pydantic_model(output, output_type)
elif output_type == dict:
elif output_type is dict:
return dict_to_json_str(output)
elif output_type == str:
elif output_type is str:
return output
else:
return output
78 changes: 52 additions & 26 deletions tests/agent_evals/auto_test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,26 +93,45 @@ def __init__(
self.last_issue_time = datetime.now()

def _get_swarms_version(self) -> str:
"""Get the installed version of Swarms."""
try:
import swarms
"""Get the installed version of Swarms."""

Check failure

Code scanning / Pyre

Parsing failure Error test

Parsing failure [404]: expected an indented block after function definition on line 95
try:
import swarms

return swarms.__version__
except:
return "Unknown"
return swarms.__version__
except:
return "Unknown"

def _get_gpu_info(self) -> Tuple[bool, Optional[str]]:
"""Get GPU information and CUDA availability."""
try:
import torch
"""Get GPU information and CUDA availability."""
Fixed Show fixed Hide fixed
try:
import torch

cuda_available = torch.cuda.is_available()
if cuda_available:
gpu_info = torch.cuda.get_device_name(0)
return cuda_available, gpu_info
return False, None
except ModuleNotFoundError as e:
# Handle the case where 'torch' is not installed
print(f"Error: {e}")
return False, None
except RuntimeError as e:
# Handle CUDA-related errors
print(f"Error: {e}")def _get_swarms_version(self) -> str:
"""Get the installed version of Swarms."""
try:
import swarms

return swarms.__version__
except:
return "Unknown"

return False, None
except Exception as e:
# Catch any other exceptions
print(f"Unexpected error: {e}")
return False, None

cuda_available = torch.cuda.is_available()
if cuda_available:
gpu_info = torch.cuda.get_device_name(0)
return cuda_available, gpu_info
return False, None
except:
return False, None

def _get_system_info(self) -> SwarmSystemInfo:
"""Collect system and Swarms-specific information."""
Expand Down Expand Up @@ -199,16 +218,23 @@ def _format_swarms_issue_body(
"""

def _get_dependencies_info(self) -> str:
"""Get information about installed dependencies."""
try:
import pkg_resources

deps = []
for dist in pkg_resources.working_set:
deps.append(f"- {dist.key} {dist.version}")
return "\n".join(deps)
except:
return "Unable to fetch dependency information"
"""Get information about installed dependencies."""
try:
import pkg_resources

deps = []
for dist in pkg_resources.working_set:
deps.append(f"- {dist.key} {dist.version}")
return "\n".join(deps)
except ImportError as e:
# Handle the case where pkg_resources is not available
print(f"Error: {e}")
return "Unable to fetch dependency information"
except Exception as e:
# Catch any other unexpected exceptions
print(f"Unexpected error: {e}")
return "Unable to fetch dependency information"


# First, add this method to your SwarmsIssueReporter class
def _check_rate_limit(self) -> bool:
Expand Down
5 changes: 1 addition & 4 deletions tests/profiling_agent.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
import time

start_time = time.time()

import os
import uuid
from swarms import Agent
from swarm_models import OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)

start_time = time.time()

# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
Expand Down
Loading
Loading