diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 6e563af26..c0ad132e3 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -1,6 +1,8 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python +# [ ] TODO [pep 458](https://blog.pypi.org/posts/2024-11-14-pypi-now-supports-digital-attestations/) + name: Python package on: @@ -16,7 +18,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 diff --git a/.gitignore b/.gitignore index 89b0cdc7f..9f6e25b6b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,8 @@ audio/ video/ artifacts_three dataframe/ - +.ruff_cache +.pytest_cache static/generated runs Financial-Analysis-Agent_state.json diff --git a/README.md b/README.md index 6469883d8..378b8c54b 100644 --- a/README.md +++ b/README.md @@ -131,13 +131,24 @@ The `run` method is the primary entry point for executing tasks with an `Agent` ```python from swarms import Agent +from swarms.models import OpenAIChat + +model = OpenAIChat( + openai_api_key="your-api-key", + model_name="gpt-4o-mini", + temperature=0.1 +) agent = Agent( agent_name="Stock-Analysis-Agent", - model_name="gpt-4o-mini", + system_prompt="You are a stock market analysis expert. Analyze market trends and provide insights.", + llm=model, max_loops="auto", interactive=True, streaming_on=True, + verbose=True, + autosave=True, + saved_state_path="stock_analysis_agent.json" ) agent.run("What is the current market trend for tech stocks?") @@ -150,29 +161,31 @@ The `Agent` class offers a range of settings to tailor its behavior to specific | Setting | Description | Default Value | | --- | --- | --- | | `agent_name` | The name of the agent. | "DefaultAgent" | -| `system_prompt` | The system prompt to use for the agent. | "Default system prompt." | -| `llm` | The language model to use for processing tasks. | `OpenAIChat` instance | +| `system_prompt` | The system prompt to use for the agent. | None | +| `llm` | The language model to use for processing tasks. | Required | | `max_loops` | The maximum number of loops to execute for a task. | 1 | | `autosave` | Enables or disables autosaving of the agent's state. | False | | `dashboard` | Enables or disables the dashboard for the agent. | False | | `verbose` | Controls the verbosity of the agent's output. | False | +| `streaming_on` | Enables or disables response streaming. | True | | `dynamic_temperature_enabled` | Enables or disables dynamic temperature adjustment for the language model. | False | -| `saved_state_path` | The path to save the agent's state. | "agent_state.json" | -| `user_name` | The username associated with the agent. | "default_user" | -| `retry_attempts` | The number of retry attempts for failed tasks. | 1 | -| `context_length` | The maximum length of the context to consider for tasks. | 200000 | -| `return_step_meta` | Controls whether to return step metadata in the output. | False | -| `output_type` | The type of output to return (e.g., "json", "string"). | "string" | +| `saved_state_path` | The path to save the agent's state. | None | +| `user_name` | The username associated with the agent. | "User" | +| `retry_attempts` | The number of retry attempts for failed tasks. | 3 | +| `context_length` | The maximum length of the context to consider for tasks. | 8192 | +| `multi_modal` | Enables or disables multimodal support. | False | +| `code_interpreter` | Enables or disables code execution. | False | +| `self_healing_enabled` | Enables or disables error recovery. | False | +| `sentiment_threshold` | The threshold for response evaluation. | 0.7 | +| `tags` | A list of strings for categorizing the agent. | None | +| `use_cases` | A list of dictionaries documenting the agent's use cases. | None | ```python import os from swarms import Agent -from swarm_models import OpenAIChat - -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) +from swarms.models import OpenAIChat +from swarms.prompts.finance_agent_sys_prompt import FINANCIAL_AGENT_SYS_PROMPT from dotenv import load_dotenv load_dotenv() @@ -180,7 +193,7 @@ load_dotenv() # Get the OpenAI API key from the environment variable api_key = os.getenv("OPENAI_API_KEY") -# Create an instance of the OpenAIChat class +# Create model instance model = OpenAIChat( openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 ) @@ -194,21 +207,29 @@ agent = Agent( autosave=True, dashboard=False, verbose=True, + streaming_on=True, dynamic_temperature_enabled=True, saved_state_path="finance_agent.json", user_name="swarms_corp", - retry_attempts=1, + retry_attempts=3, context_length=200000, - return_step_meta=False, - output_type="string", - streaming_on=False, + multi_modal=False, + code_interpreter=True, + self_healing_enabled=True, + sentiment_threshold=0.7, + tags=["finance", "analysis"], + use_cases=[{"name": "Financial Analysis", "description": "Analyze financial data and provide insights"}] ) +# Modern method usage +agent.update_system_prompt("New system prompt") +agent.update_max_loops(5) +agent.update_loop_interval(2) +agent.update_retry_attempts(5) +print(agent.get_llm_parameters()) -agent.run( - "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" -) - +# Run the agent +agent.run("Analyze the latest quarterly financial report for Tesla") ``` ----- ### Integrating RAG with Swarms for Enhanced Long-Term Memory @@ -231,19 +252,22 @@ graph TD ```python import os -from swarms_memory import ChromaDB +from swarms.memory import ChromaDB # Initialize the ChromaDB client for long-term memory management chromadb = ChromaDB( metric="cosine", # Metric for similarity measurement output_dir="finance_agent_rag", # Directory for storing RAG data - # docs_folder="artifacts", # Uncomment and specify the folder containing your documents + limit_tokens=1000, # Maximum tokens per query + n_results=1, # Number of results to retrieve + docs_folder=None, # Optional folder containing documents to add + verbose=False # Enable verbose logging if needed ) ``` **Step 2: Define the Model** ```python -from swarm_models import Anthropic +from swarms.models import Anthropic from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) @@ -378,7 +402,7 @@ The following is an example of an agent that intakes a pydantic basemodel and ou ```python from pydantic import BaseModel, Field from swarms import Agent -from swarm_models import Anthropic +from swarms.models import OpenAIChat # Initialize the schema for the person's information @@ -410,7 +434,11 @@ agent = Agent( ), # Set the tool schema to the JSON string -- this is the key difference tool_schema=tool_schema, - llm=Anthropic(), + llm=OpenAIChat( + openai_api_key="your-api-key", + model_name="gpt-4o", + temperature=0.1 + ), max_loops=3, autosave=True, dashboard=False, @@ -442,6 +470,7 @@ Run the agent with multiple modalities useful for various real-world tasks in ma import os from dotenv import load_dotenv from swarms import Agent +from swarms.models import OpenAIChat from swarm_models import GPT4VisionAPI @@ -450,7 +479,7 @@ load_dotenv() # Initialize the language model -llm = GPT4VisionAPI( +llm = OpenAIChat( openai_api_key=os.environ.get("OPENAI_API_KEY"), max_tokens=500, ) @@ -552,7 +581,7 @@ Steps: For example, here's an example on how to create an agent from griptape. -Here’s how you can create a custom **Griptape** agent that integrates with the **Swarms** framework by inheriting from the `Agent` class in **Swarms** and overriding the `run(task: str) -> str` method. +Here's how you can create a custom **Griptape** agent that integrates with the **Swarms** framework by inheriting from the `Agent` class in **Swarms** and overriding the `run(task: str) -> str` method. ```python @@ -694,7 +723,7 @@ In this example, each `Agent` represents a task that is executed sequentially. T ```python import os from swarms import Agent, SequentialWorkflow -from swarm_models import OpenAIChat +from swarms.models import OpenAIChat # model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")) company = "Nvidia" @@ -911,7 +940,7 @@ The `run` method returns the final output after all agents have processed the in from swarms import Agent, AgentRearrange -from swarm_models import Anthropic +from swarm.models import Anthropic # Initialize the director agent @@ -1288,7 +1317,7 @@ The `run` method returns a dictionary containing the outputs of each agent that ```python import os from swarms import Agent -from swarm_models import OpenAIChat +from swarms.models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform @@ -1613,7 +1642,7 @@ The `SwarmRouter` class is a flexible routing system designed to manage differen import os from dotenv import load_dotenv from swarms import Agent -from swarm_models import OpenAIChat +from swarms.models import OpenAIChat from swarms.structs.swarm_router import SwarmRouter, SwarmType load_dotenv() diff --git a/docs/swarms/install/install.md b/docs/swarms/install/install.md index f69a09bdd..9d52d84e5 100644 --- a/docs/swarms/install/install.md +++ b/docs/swarms/install/install.md @@ -127,7 +127,7 @@ Before you begin, ensure you have the following installed: poetry install --extras "desktop" ``` -=== "Using Docker" +=== "Using Docker COMING SOON [DOES NOT WORK YET]" Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production. diff --git a/swarms/structs/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py index 8d981dda0..93e542fd4 100644 --- a/swarms/structs/auto_swarm_builder.py +++ b/swarms/structs/auto_swarm_builder.py @@ -1,5 +1,3 @@ -from loguru import logger - import os from typing import List