diff --git a/agenta-cli/agenta/__init__.py b/agenta-cli/agenta/__init__.py index 53c65db70f..71097cd836 100644 --- a/agenta-cli/agenta/__init__.py +++ b/agenta-cli/agenta/__init__.py @@ -8,7 +8,6 @@ DictInput, MultipleChoice, FloatParam, - InFile, IntParam, MultipleChoiceParam, GroupedMultipleChoiceParam, @@ -28,6 +27,7 @@ from .sdk.utils.costs import calculate_token_usage from .sdk.client import Agenta from .sdk.litellm import litellm as callbacks +from .sdk.managers.vault import VaultManager from .sdk.managers.config import ConfigManager from .sdk.managers.variant import VariantManager from .sdk.managers.deployment import DeploymentManager diff --git a/agenta-cli/agenta/client/client.py b/agenta-cli/agenta/client/client.py index d5e4547f74..17dc1ac460 100644 --- a/agenta-cli/agenta/client/client.py +++ b/agenta-cli/agenta/client/client.py @@ -559,5 +559,5 @@ def run_evaluation(app_name: str, host: str, api_key: str = None) -> str: raise APIRequestError( f"Request to run evaluations failed with status code {response.status_code} and error message: {error_message}." ) - print(response.json()) + return response.json() diff --git a/agenta-cli/agenta/sdk/__init__.py b/agenta-cli/agenta/sdk/__init__.py index c1e40757c4..6d3c4da842 100644 --- a/agenta-cli/agenta/sdk/__init__.py +++ b/agenta-cli/agenta/sdk/__init__.py @@ -8,7 +8,6 @@ DictInput, MultipleChoice, FloatParam, - InFile, IntParam, MultipleChoiceParam, GroupedMultipleChoiceParam, @@ -27,6 +26,7 @@ from .decorators.routing import entrypoint, app, route from .agenta_init import Config, AgentaSingleton, init as _init from .utils.costs import calculate_token_usage +from .managers.vault import VaultManager from .managers.config import ConfigManager from .managers.variant import VariantManager from .managers.deployment import DeploymentManager diff --git a/agenta-cli/agenta/sdk/agenta_init.py b/agenta-cli/agenta/sdk/agenta_init.py index c2180457c2..06659f4f4d 100644 --- a/agenta-cli/agenta/sdk/agenta_init.py +++ b/agenta-cli/agenta/sdk/agenta_init.py @@ -6,8 +6,9 @@ from agenta.sdk.utils.logging import log from agenta.sdk.utils.globals import set_global from agenta.client.backend.client import AgentaApi, AsyncAgentaApi + from agenta.sdk.tracing import Tracing -from agenta.client.exceptions import APIRequestError +from agenta.sdk.context.routing import routing_context class AgentaSingleton: @@ -59,9 +60,7 @@ def init( ValueError: If `app_id` is not specified either as an argument, in the config file, or in the environment variables. """ - log.info("---------------------------") - log.info("Agenta SDK - using version: %s", version("agenta")) - log.info("---------------------------") + log.info("Agenta - SDK version: %s", version("agenta")) config = {} if config_fname: @@ -86,6 +85,13 @@ def init( self.api_key = api_key or getenv("AGENTA_API_KEY") or config.get("api_key") + self.base_id = getenv("AGENTA_BASE_ID") + + self.service_id = getenv("AGENTA_SERVICE_ID") or self.base_id + + log.info("Agenta - Service ID: %s", self.service_id) + log.info("Agenta - Application ID: %s", self.app_id) + self.tracing = Tracing( url=f"{self.host}/api/observability/v1/otlp/traces", # type: ignore redact=redact, @@ -94,6 +100,7 @@ def init( self.tracing.configure( api_key=self.api_key, + service_id=self.service_id, # DEPRECATING app_id=self.app_id, ) @@ -108,8 +115,6 @@ def init( api_key=self.api_key if self.api_key else "", ) - self.base_id = getenv("AGENTA_BASE_ID") - self.config = Config( host=self.host, base_id=self.base_id, @@ -120,28 +125,43 @@ def init( class Config: def __init__( self, - host: str, + # LEGACY + host: Optional[str] = None, base_id: Optional[str] = None, - api_key: Optional[str] = "", + api_key: Optional[str] = None, + # LEGACY + **kwargs, ): - self.host = host + self.default_parameters = {**kwargs} + + def set_default(self, **kwargs): + self.default_parameters.update(kwargs) + + def get_default(self): + return self.default_parameters + + def __getattr__(self, key): + context = routing_context.get() + + parameters = context.parameters + + if not parameters: + return None + + if key in parameters: + value = parameters[key] + + if isinstance(value, dict): + nested_config = Config() + nested_config.set_default(**value) - self.base_id = base_id + return nested_config - if self.base_id is None: - # print( - # "Warning: Your configuration will not be saved permanently since base_id is not provided.\n" - # ) - pass + return value - if base_id is None or host is None: - self.persist = False - else: - self.persist = True - self.client = AgentaApi( - base_url=self.host + "/api", - api_key=api_key if api_key else "", - ) + return None + + ### --- LEGACY --- ### def register_default(self, overwrite=False, **kwargs): """alias for default""" @@ -153,104 +173,13 @@ def default(self, overwrite=False, **kwargs): overwrite: Whether to overwrite the existing configuration or not **kwargs: A dict containing the parameters """ - self.set( - **kwargs - ) # In case there is no connectivity, we still can use the default values - try: - self.push(config_name="default", overwrite=overwrite, **kwargs) - except Exception as ex: - log.warning( - "Unable to push the default configuration to the server. %s", str(ex) - ) - - def push(self, config_name: str, overwrite=True, **kwargs): - """Pushes the parameters for the app variant to the server - Args: - config_name: Name of the configuration to push to - overwrite: Whether to overwrite the existing configuration or not - **kwargs: A dict containing the parameters - """ - if not self.persist: - return - try: - self.client.configs.save_config( - base_id=self.base_id, - config_name=config_name, - parameters=kwargs, - overwrite=overwrite, - ) - except Exception as ex: - log.warning( - "Failed to push the configuration to the server with error: %s", ex - ) - - def pull( - self, config_name: str = "default", environment_name: Optional[str] = None - ): - """Pulls the parameters for the app variant from the server and sets them to the config""" - if not self.persist and ( - config_name != "default" or environment_name is not None - ): - raise ValueError( - "Cannot pull the configuration from the server since the app_name and base_name are not provided." - ) - if self.persist: - try: - if environment_name: - config = self.client.configs.get_config( - base_id=self.base_id, environment_name=environment_name - ) - - else: - config = self.client.configs.get_config( - base_id=self.base_id, - config_name=config_name, - ) - except Exception as ex: - log.warning( - "Failed to pull the configuration from the server with error: %s", - str(ex), - ) - try: - self.set(**{"current_version": config.current_version, **config.parameters}) - except Exception as ex: - log.warning("Failed to set the configuration with error: %s", str(ex)) + self.set(**kwargs) - def all(self): - """Returns all the parameters for the app variant""" - return { - k: v - for k, v in self.__dict__.items() - if k - not in [ - "app_name", - "base_name", - "host", - "base_id", - "api_key", - "persist", - "client", - ] - } - - # function to set the parameters for the app variant def set(self, **kwargs): - """Sets the parameters for the app variant - - Args: - **kwargs: A dict containing the parameters - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - def dump(self): - """Returns all the information about the current version in the configuration. + self.set_default(**kwargs) - Raises: - NotImplementedError: _description_ - """ - - raise NotImplementedError() + def all(self): + return self.default_parameters def init( diff --git a/agenta-cli/agenta/sdk/context/exporting.py b/agenta-cli/agenta/sdk/context/exporting.py new file mode 100644 index 0000000000..2fe03a09cd --- /dev/null +++ b/agenta-cli/agenta/sdk/context/exporting.py @@ -0,0 +1,25 @@ +from typing import Optional + +from contextlib import contextmanager +from contextvars import ContextVar + +from pydantic import BaseModel + + +class ExportingContext(BaseModel): + credentials: Optional[str] = None + + +exporting_context = ContextVar("exporting_context", default=ExportingContext()) + + +@contextmanager +def exporting_context_manager( + *, + context: Optional[ExportingContext] = None, +): + token = exporting_context.set(context) + try: + yield + finally: + exporting_context.reset(token) diff --git a/agenta-cli/agenta/sdk/context/routing.py b/agenta-cli/agenta/sdk/context/routing.py index 1d716a69ec..1284898289 100644 --- a/agenta-cli/agenta/sdk/context/routing.py +++ b/agenta-cli/agenta/sdk/context/routing.py @@ -1,24 +1,24 @@ +from typing import Any, Dict, List, Optional + from contextlib import contextmanager from contextvars import ContextVar -from typing import Any, Dict, Optional -routing_context = ContextVar("routing_context", default={}) +from pydantic import BaseModel + + +class RoutingContext(BaseModel): + parameters: Optional[Dict[str, Any]] = None + secrets: Optional[List[Any]] = None + + +routing_context = ContextVar("routing_context", default=RoutingContext()) @contextmanager def routing_context_manager( *, - config: Optional[Dict[str, Any]] = None, - application: Optional[Dict[str, Any]] = None, - variant: Optional[Dict[str, Any]] = None, - environment: Optional[Dict[str, Any]] = None, + context: Optional[RoutingContext] = None, ): - context = { - "config": config, - "application": application, - "variant": variant, - "environment": environment, - } token = routing_context.set(context) try: yield diff --git a/agenta-cli/agenta/sdk/context/tracing.py b/agenta-cli/agenta/sdk/context/tracing.py index 0585a014ad..3bebe13dc1 100644 --- a/agenta-cli/agenta/sdk/context/tracing.py +++ b/agenta-cli/agenta/sdk/context/tracing.py @@ -1,3 +1,28 @@ +from typing import Any, Dict, Optional + +from contextlib import contextmanager from contextvars import ContextVar -tracing_context = ContextVar("tracing_context", default={}) +from pydantic import BaseModel + + +class TracingContext(BaseModel): + credentials: Optional[str] = None + parameters: Optional[Dict[str, Any]] = None + references: Optional[Dict[str, Any]] = None + link: Optional[Dict[str, Any]] = None + + +tracing_context = ContextVar("tracing_context", default=TracingContext()) + + +@contextmanager +def tracing_context_manager( + *, + context: Optional[TracingContext] = None, +): + token = tracing_context.set(context) + try: + yield + finally: + tracing_context.reset(token) diff --git a/agenta-cli/agenta/sdk/decorators/routing.py b/agenta-cli/agenta/sdk/decorators/routing.py index 6be0c1c309..783533f651 100644 --- a/agenta-cli/agenta/sdk/decorators/routing.py +++ b/agenta-cli/agenta/sdk/decorators/routing.py @@ -1,30 +1,36 @@ from typing import Type, Any, Callable, Dict, Optional, Tuple, List -from annotated_types import Ge, Le, Gt, Lt -from pydantic import BaseModel, HttpUrl, ValidationError -from json import dumps from inspect import signature, iscoroutinefunction, Signature, Parameter, _empty -from argparse import ArgumentParser from functools import wraps -from asyncio import sleep, get_event_loop -from traceback import format_exc, format_exception -from pathlib import Path +from traceback import format_exception +from asyncio import sleep + from tempfile import NamedTemporaryFile -from os import environ +from annotated_types import Ge, Le, Gt, Lt +from pydantic import BaseModel, HttpUrl, ValidationError + +from fastapi import Body, FastAPI, UploadFile, HTTPException, Request -from fastapi.middleware.cors import CORSMiddleware -from fastapi import Body, FastAPI, UploadFile, HTTPException +from agenta.sdk.middleware.auth import AuthMiddleware +from agenta.sdk.middleware.otel import OTelMiddleware +from agenta.sdk.middleware.config import ConfigMiddleware +from agenta.sdk.middleware.vault import VaultMiddleware +from agenta.sdk.middleware.cors import CORSMiddleware -from agenta.sdk.middleware.auth import AuthorizationMiddleware -from agenta.sdk.context.routing import routing_context_manager, routing_context -from agenta.sdk.context.tracing import tracing_context +from agenta.sdk.context.routing import ( + routing_context_manager, + RoutingContext, +) +from agenta.sdk.context.tracing import ( + tracing_context_manager, + tracing_context, + TracingContext, +) from agenta.sdk.router import router -from agenta.sdk.utils import helpers -from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.utils.exceptions import suppress, display_exception from agenta.sdk.utils.logging import log from agenta.sdk.types import ( DictInput, FloatParam, - InFile, IntParam, MultipleChoiceParam, MultipleChoice, @@ -39,19 +45,10 @@ import agenta as ag -AGENTA_USE_CORS = str(environ.get("AGENTA_USE_CORS", "true")).lower() in ( - "true", - "1", - "t", -) - app = FastAPI() log.setLevel("DEBUG") -_MIDDLEWARES = True - - app.include_router(router, prefix="") @@ -59,13 +56,17 @@ class PathValidator(BaseModel): url: HttpUrl -class route: +class route: # pylint: disable=invalid-name # This decorator is used to expose specific stages of a workflow (embedding, retrieval, summarization, etc.) # as independent endpoints. It is designed for backward compatibility with existing code that uses # the @entrypoint decorator, which has certain limitations. By using @route(), we can create new # routes without altering the main workflow entrypoint. This helps in modularizing the services # and provides flexibility in how we expose different functionalities as APIs. - def __init__(self, path, config_schema: BaseModel): + def __init__( + self, + path: Optional[str] = "/", + config_schema: Optional[BaseModel] = None, + ): self.config_schema: BaseModel = config_schema path = "/" + path.strip("/").strip() path = "" if path == "/" else path @@ -73,9 +74,13 @@ def __init__(self, path, config_schema: BaseModel): self.route_path = path + self.e = None + def __call__(self, f): self.e = entrypoint( - f, route_path=self.route_path, config_schema=self.config_schema + f, + route_path=self.route_path, + config_schema=self.config_schema, ) return f @@ -114,289 +119,258 @@ async def chain_of_prompts_llm(prompt: str): routes = list() + _middleware = False + _run_path = "/run" + _test_path = "/test" + # LEGACY + _legacy_playground_run_path = "/playground/run" + _legacy_generate_path = "/generate" + _legacy_generate_deployed_path = "/generate_deployed" + def __init__( self, func: Callable[..., Any], - route_path="", + route_path: str = "", config_schema: Optional[BaseModel] = None, ): - ### --- Update Middleware --- # - try: - global _MIDDLEWARES # pylint: disable=global-statement - - if _MIDDLEWARES: - app.add_middleware( - AuthorizationMiddleware, - host=ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host, - resource_id=ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.app_id, - resource_type="application", - ) + self.func = func + self.route_path = route_path + self.config_schema = config_schema - if AGENTA_USE_CORS: - app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_methods=["*"], - allow_headers=["*"], - allow_credentials=True, - ) - - _MIDDLEWARES = False - - except: # pylint: disable=bare-except - log.warning("Agenta SDK - failed to secure route: %s", route_path) - ### --- Update Middleware --- # - - DEFAULT_PATH = "generate" - PLAYGROUND_PATH = "/playground" - RUN_PATH = "/run" - func_signature = signature(func) - try: - config = ( - config_schema() if config_schema else None - ) # we initialize the config object to be able to use it - except ValidationError as e: - raise ValueError( - f"Error initializing config_schema. Please ensure all required fields have default values: {str(e)}" - ) from e - except Exception as e: - raise ValueError( - f"Unexpected error initializing config_schema: {str(e)}" - ) from e - - config_params = config.dict() if config else ag.config.all() - ingestible_files = self.extract_ingestible_files(func_signature) + signature_parameters = signature(func).parameters + config, default_parameters = self.parse_config() - self.route_path = route_path + ### --- Middleware --- # + if not entrypoint._middleware: + entrypoint._middleware = True + + app.add_middleware(VaultMiddleware) + app.add_middleware(ConfigMiddleware) + app.add_middleware(AuthMiddleware) + app.add_middleware(OTelMiddleware) + app.add_middleware(CORSMiddleware) + ### ------------------ # - ### --- Playground --- # + ### --- Run --- # @wraps(func) - async def wrapper(*args, **kwargs) -> Any: - func_params, api_config_params = self.split_kwargs(kwargs, config_params) - self.ingest_files(func_params, ingestible_files) - if not config_schema: - ag.config.set(**api_config_params) - - with routing_context_manager( - config=api_config_params, - ): - entrypoint_result = await self.execute_function( - func, - True, # inline trace: True - *args, - params=func_params, - config_params=config_params, + async def run_wrapper(request: Request, *args, **kwargs) -> Any: + # LEGACY + # TODO: Removing this implies breaking changes in : + # - calls to /generate_deployed + kwargs = { + k: v + for k, v in kwargs.items() + if k not in ["config", "environment", "app"] + } + # LEGACY + + kwargs, _ = self.process_kwargs(kwargs, default_parameters) + if request.state.config["parameters"] is None: + raise HTTPException( + status_code=400, + detail="Config not found based on provided references.", ) - return entrypoint_result - self.update_function_signature( - wrapper=wrapper, - func_signature=func_signature, - config_class=config, - config_dict=config_params, - ingestible_files=ingestible_files, + return await self.execute_wrapper(request, False, *args, **kwargs) + + self.update_run_wrapper_signature(wrapper=run_wrapper) + + run_route = f"{entrypoint._run_path}{route_path}" + app.post(run_route, response_model=BaseResponse)(run_wrapper) + + # LEGACY + # TODO: Removing this implies breaking changes in : + # - calls to /generate_deployed must be replaced with calls to /run + if route_path == "": + run_route = entrypoint._legacy_generate_deployed_path + app.post(run_route, response_model=BaseResponse)(run_wrapper) + # LEGACY + ### ----------- # + + ### --- Test --- # + @wraps(func) + async def test_wrapper(request: Request, *args, **kwargs) -> Any: + kwargs, config = self.process_kwargs(kwargs, default_parameters) + + request.state.config["parameters"] = config + return await self.execute_wrapper(request, True, *args, **kwargs) + + self.update_test_wrapper_signature( + wrapper=test_wrapper, + config_instance=config ) - # + test_route = f"{entrypoint._test_path}{route_path}" + app.post(test_route, response_model=BaseResponse)(test_wrapper) + + # LEGACY + # TODO: Removing this implies breaking changes in : + # - calls to /generate must be replaced with calls to /test if route_path == "": - route = f"/{DEFAULT_PATH}" - app.post(route, response_model=BaseResponse)(wrapper) - entrypoint.routes.append( - { - "func": func.__name__, - "endpoint": route, - "params": ( - {**config_params, **func_signature.parameters} - if not config - else func_signature.parameters - ), - "config": config, - } - ) + test_route = entrypoint._legacy_generate_path + app.post(test_route, response_model=BaseResponse)(test_wrapper) + # LEGACY + + # LEGACY + # TODO: Removing this implies no breaking changes + if route_path == "": + test_route = entrypoint._legacy_playground_run_path + app.post(test_route, response_model=BaseResponse)(test_wrapper) + # LEGACY + ### ------------ # - route = f"{PLAYGROUND_PATH}{RUN_PATH}{route_path}" - app.post(route, response_model=BaseResponse)(wrapper) + ### --- OpenAPI --- # + test_route = f"{entrypoint._test_path}{route_path}" entrypoint.routes.append( { "func": func.__name__, - "endpoint": route, - "params": ( - {**config_params, **func_signature.parameters} - if not config - else func_signature.parameters - ), + "endpoint": test_route, + "params": signature_parameters, "config": config, } ) - ### ---------------------------- # - ### --- Deployed --- # - @wraps(func) - async def wrapper_deployed(*args, **kwargs) -> Any: - func_params = { - k: v - for k, v in kwargs.items() - if k not in ["config", "environment", "app"] - } - if not config_schema: - if "environment" in kwargs and kwargs["environment"] is not None: - ag.config.pull(environment_name=kwargs["environment"]) - elif "config" in kwargs and kwargs["config"] is not None: - ag.config.pull(config_name=kwargs["config"]) - else: - ag.config.pull(config_name="default") - - app_id = environ.get("AGENTA_APP_ID") - - with routing_context_manager( - application={ - "id": app_id, - "slug": kwargs.get("app"), - }, - variant={ - "slug": kwargs.get("config"), - }, - environment={ - "slug": kwargs.get("environment"), - }, - ): - entrypoint_result = await self.execute_function( - func, - False, # inline trace: False - *args, - params=func_params, - config_params=config_params, - ) - - return entrypoint_result - - self.update_deployed_function_signature( - wrapper_deployed, - func_signature, - ingestible_files, - ) + # LEGACY if route_path == "": - route_deployed = f"/{DEFAULT_PATH}_deployed" - app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed) - - route_deployed = f"{RUN_PATH}{route_path}" - app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed) - ### ---------------- # + test_route = entrypoint._legacy_generate_path + entrypoint.routes.append( + { + "func": func.__name__, + "endpoint": test_route, + "params": ( + {**default_parameters, **signature_parameters} + if not config + else signature_parameters + ), + "config": config, + } + ) + # LEGACY - ### --- Update OpenAPI --- # app.openapi_schema = None # Forces FastAPI to re-generate the schema openapi_schema = app.openapi() - # Inject the current version of the SDK into the openapi_schema - openapi_schema["agenta_sdk"] = {"version": helpers.get_current_version()} - - for route in entrypoint.routes: - self.override_schema( - openapi_schema=openapi_schema, - func_name=route["func"], - endpoint=route["endpoint"], - params=route["params"], - ) - if route["config"] is not None: # new SDK version + for _route in entrypoint.routes: + if _route["config"] is not None: self.override_config_in_schema( openapi_schema=openapi_schema, - func_name=route["func"], - endpoint=route["endpoint"], - config=route["config"], + func_name=_route["func"], + endpoint=_route["endpoint"], + config=_route["config"], ) - - if self.is_main_script(func) and route_path == "": - self.handle_terminal_run( - func, - func_signature.parameters, # type: ignore - config_params, - ingestible_files, - ) - - def extract_ingestible_files( - self, - func_signature: Signature, - ) -> Dict[str, Parameter]: - """Extract parameters annotated as InFile from function signature.""" - - return { - name: param - for name, param in func_signature.parameters.items() - if param.annotation is InFile - } - - def split_kwargs( - self, kwargs: Dict[str, Any], config_params: Dict[str, Any] + ### --------------- # + + def parse_config(self) -> Tuple[Optional[Type[BaseModel]], Dict[str, Any]]: + """Parse the config schema and return the config class and default parameters.""" + config = None + default_parameters = {} + + if self.config_schema: + try: + config = self.config_schema() if self.config_schema else None + default_parameters = config.dict() if config else {} + except ValidationError as e: + raise ValueError( + f"Error initializing config_schema. Please ensure all required fields have default values: {str(e)}" + ) from e + except Exception as e: + raise ValueError( + f"Unexpected error initializing config_schema: {str(e)}" + ) from e + + return config, default_parameters + + def process_kwargs( + self, kwargs: Dict[str, Any], default_parameters: Dict[str, Any] ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - """Split keyword arguments into function parameters and API configuration parameters.""" - - func_params = {k: v for k, v in kwargs.items() if k not in config_params} - api_config_params = {k: v for k, v in kwargs.items() if k in config_params} - return func_params, api_config_params - - def ingest_file(self, upfile: UploadFile): - temp_file = NamedTemporaryFile(delete=False) - temp_file.write(upfile.file.read()) - temp_file.close() - return InFile(file_name=upfile.filename, file_path=temp_file.name) - - def ingest_files( + """Remove the config parameters from the kwargs.""" + # Extract agenta_config if present + config_params = kwargs.pop("agenta_config", {}) + if isinstance(config_params, BaseModel): + config_params = config_params.dict() + # Merge with default parameters + config = {**default_parameters, **config_params} + return kwargs, config + + async def execute_wrapper( self, - func_params: Dict[str, Any], - ingestible_files: Dict[str, Parameter], - ) -> None: - """Ingest files specified in function parameters.""" + request: Request, + inline: bool, + *args, + **kwargs, + ): + if not request: + raise HTTPException(status_code=500, detail="Missing 'request'.") + + state = request.state + credentials = state.auth.get("credentials") + parameters = state.config.get("parameters") + references = state.config.get("references") + secrets = state.vault.get("secrets") + + with routing_context_manager( + context=RoutingContext( + parameters=parameters, + secrets=secrets, + ) + ): + with tracing_context_manager( + context=TracingContext( + credentials=credentials, + parameters=parameters, + references=references, + ) + ): + result = await self.execute_function(inline, *args, **kwargs) - for name in ingestible_files: - if name in func_params and func_params[name] is not None: - func_params[name] = self.ingest_file(func_params[name]) + return result async def execute_function( self, - func: Callable[..., Any], - inline_trace, + inline: bool, *args, - **func_params, + **kwargs, ): - log.info("Agenta SDK - handling route: %s", repr(self.route_path or "/")) - - tracing_context.set(routing_context.get()) - try: result = ( - await func(*args, **func_params["params"]) - if iscoroutinefunction(func) - else func(*args, **func_params["params"]) + await self.func(*args, **kwargs) + if iscoroutinefunction(self.func) + else self.func(*args, **kwargs) ) - return await self.handle_success(result, inline_trace) + return await self.handle_success(result, inline) - except Exception as error: + except Exception as error: # pylint: disable=broad-except self.handle_failure(error) - async def handle_success(self, result: Any, inline_trace: bool): + async def handle_success( + self, + result: Any, + inline: bool, + ): data = None tree = None + content_type = "string" with suppress(): + if isinstance(result, (dict, list)): + content_type = "json" data = self.patch_result(result) - if inline_trace: - tree = await self.fetch_inline_trace(inline_trace) - - log.info(f"----------------------------------") - log.info(f"Agenta SDK - exiting with success: 200") - log.info(f"----------------------------------") + if inline: + tree = await self.fetch_inline_trace(inline) - return BaseResponse(data=data, tree=tree) + try: + return BaseResponse(data=data, tree=tree, content_type=content_type) + except: + return BaseResponse(data=data, content_type=content_type) - def handle_failure(self, error: Exception): - log.warning("--------------------------------------------------") - log.warning("Agenta SDK - handling application exception below:") - log.warning("--------------------------------------------------") - log.warning(format_exc().strip("\n")) - log.warning("--------------------------------------------------") + def handle_failure( + self, + error: Exception, + ): + display_exception("Application Exception") status_code = 500 message = str(error) @@ -405,7 +379,10 @@ def handle_failure(self, error: Exception): raise HTTPException(status_code=status_code, detail=detail) - def patch_result(self, result: Any): + def patch_result( + self, + result: Any, + ): """ Patch the result to only include the message if the result is a FuncResponse-style dictionary with message, cost, and usage keys. @@ -442,7 +419,10 @@ def patch_result(self, result: Any): return data - async def fetch_inline_trace(self, inline_trace): + async def fetch_inline_trace( + self, + inline, + ): WAIT_FOR_SPANS = True TIMEOUT = 1 TIMESTEP = 0.1 @@ -451,12 +431,14 @@ async def fetch_inline_trace(self, inline_trace): trace = None - root_context: Dict[str, Any] = tracing_context.get().get("root") + context = tracing_context.get() + + link = context.link - trace_id = root_context.get("trace_id") if root_context else None + trace_id = link.get("tree_id") if link else None if trace_id is not None: - if inline_trace: + if inline: if WAIT_FOR_SPANS: remaining_steps = NOFSTEPS @@ -476,6 +458,27 @@ async def fetch_inline_trace(self, inline_trace): return trace + # --- OpenAPI --- # + + def add_request_to_signature( + self, + wrapper: Callable[..., Any], + ): + original_sig = signature(wrapper) + parameters = [ + Parameter( + "request", + kind=Parameter.POSITIONAL_OR_KEYWORD, + annotation=Request, + ), + *original_sig.parameters.values(), + ] + new_sig = Signature( + parameters, + return_annotation=original_sig.return_annotation, + ) + wrapper.__signature__ = new_sig + def update_wrapper_signature( self, wrapper: Callable[..., Any], updated_params: List ): @@ -492,77 +495,57 @@ def update_wrapper_signature( wrapper_signature = wrapper_signature.replace(parameters=updated_params) wrapper.__signature__ = wrapper_signature # type: ignore - def update_function_signature( + def update_test_wrapper_signature( self, wrapper: Callable[..., Any], - func_signature: Signature, - config_class: Type[BaseModel], # TODO: change to our type - config_dict: Dict[str, Any], - ingestible_files: Dict[str, Parameter], + config_instance: Type[BaseModel], # TODO: change to our type ) -> None: """Update the function signature to include new parameters.""" updated_params: List[Parameter] = [] - if config_class: - self.add_config_params_to_parser(updated_params, config_class) - else: - self.deprecated_add_config_params_to_parser(updated_params, config_dict) - self.add_func_params_to_parser(updated_params, func_signature, ingestible_files) + self.add_config_params_to_parser(updated_params, config_instance) + self.add_func_params_to_parser(updated_params) self.update_wrapper_signature(wrapper, updated_params) + self.add_request_to_signature(wrapper) - def update_deployed_function_signature( + def update_run_wrapper_signature( self, wrapper: Callable[..., Any], - func_signature: Signature, - ingestible_files: Dict[str, Parameter], ) -> None: """Update the function signature to include new parameters.""" updated_params: List[Parameter] = [] - self.add_func_params_to_parser(updated_params, func_signature, ingestible_files) - for param in [ - "config", - "environment", - ]: # we add the config and environment parameters - updated_params.append( - Parameter( - name=param, - kind=Parameter.KEYWORD_ONLY, - default=Body(None), - annotation=str, - ) - ) + self.add_func_params_to_parser(updated_params) self.update_wrapper_signature(wrapper, updated_params) + self.add_request_to_signature(wrapper) def add_config_params_to_parser( - self, updated_params: list, config_class: Type[BaseModel] + self, updated_params: list, config_instance: Type[BaseModel] ) -> None: """Add configuration parameters to function signature.""" - for name, field in config_class.__fields__.items(): + for name, field in config_instance.__fields__.items(): assert field.default is not None, f"Field {name} has no default value" - updated_params.append( - Parameter( - name=name, - kind=Parameter.KEYWORD_ONLY, - annotation=field.annotation.__name__, - default=Body(field.default), - ) + updated_params.append( + Parameter( + name="agenta_config", + kind=Parameter.KEYWORD_ONLY, + annotation=type(config_instance), # Get the actual class type + default=Body(config_instance), # Use the instance directly ) + ) - def deprecated_add_config_params_to_parser( - self, updated_params: list, config_dict: Dict[str, Any] - ) -> None: - """Add configuration parameters to function signature.""" - for name, param in config_dict.items(): + def add_func_params_to_parser(self, updated_params: list) -> None: + """Add function parameters to function signature.""" + for name, param in signature(self.func).parameters.items(): assert ( - len(param.__class__.__bases__) == 1 - ), f"Inherited standard type of {param.__class__} needs to be one." + len(param.default.__class__.__bases__) == 1 + ), f"Inherited standard type of {param.default.__class__} needs to be one." updated_params.append( Parameter( - name=name, - kind=Parameter.KEYWORD_ONLY, - default=Body(param), - annotation=param.__class__.__bases__[ + name, + Parameter.KEYWORD_ONLY, + default=Body(..., embed=True), + annotation=param.default.__class__.__bases__[ 0 ], # determines and get the base (parent/inheritance) type of the sdk-type at run-time. \ # E.g __class__ is ag.MessagesInput() and accessing it parent type will return (,), \ @@ -570,144 +553,6 @@ def deprecated_add_config_params_to_parser( ) ) - def add_func_params_to_parser( - self, - updated_params: list, - func_signature: Signature, - ingestible_files: Dict[str, Parameter], - ) -> None: - """Add function parameters to function signature.""" - for name, param in func_signature.parameters.items(): - if name in ingestible_files: - updated_params.append( - Parameter(name, param.kind, annotation=UploadFile) - ) - else: - assert ( - len(param.default.__class__.__bases__) == 1 - ), f"Inherited standard type of {param.default.__class__} needs to be one." - updated_params.append( - Parameter( - name, - Parameter.KEYWORD_ONLY, - default=Body(..., embed=True), - annotation=param.default.__class__.__bases__[ - 0 - ], # determines and get the base (parent/inheritance) type of the sdk-type at run-time. \ - # E.g __class__ is ag.MessagesInput() and accessing it parent type will return (,), \ - # thus, why we are accessing the first item. - ) - ) - - def is_main_script(self, func: Callable) -> bool: - """ - Check if the script containing the function is the main script being run. - - Args: - func (Callable): The function object to check. - - Returns: - bool: True if the script containing the function is the main script, False otherwise. - - Example: - if is_main_script(my_function): - print("This is the main script.") - """ - return func.__module__ == "__main__" - - def handle_terminal_run( - self, - func: Callable, - func_params: Dict[str, Parameter], - config_params: Dict[str, Any], - ingestible_files: Dict, - ): - """ - Parses command line arguments and sets configuration when script is run from the terminal. - - Args: - func_params (dict): A dictionary containing the function parameters and their annotations. - config_params (dict): A dictionary containing the configuration parameters. - ingestible_files (dict): A dictionary containing the files that should be ingested. - """ - - # For required parameters, we add them as arguments - parser = ArgumentParser() - for name, param in func_params.items(): - if name in ingestible_files: - parser.add_argument(name, type=str) - else: - parser.add_argument(name, type=param.annotation) - - for name, param in config_params.items(): - if type(param) is MultipleChoiceParam: - parser.add_argument( - f"--{name}", - type=str, - default=param.default, - choices=param.choices, # type: ignore - ) - else: - parser.add_argument( - f"--{name}", - type=type(param), - default=param, - ) - - args = parser.parse_args() - - # split the arg list into the arg in the app_param and - # the args from the sig.parameter - args_config_params = {k: v for k, v in vars(args).items() if k in config_params} - args_func_params = { - k: v for k, v in vars(args).items() if k not in config_params - } - for name in ingestible_files: - args_func_params[name] = InFile( - file_name=Path(args_func_params[name]).stem, - file_path=args_func_params[name], - ) - - # Update args_config_params with default values from config_params if not provided in command line arguments - args_config_params.update( - { - key: value - for key, value in config_params.items() - if key not in args_config_params - } - ) - - loop = get_event_loop() - - with routing_context_manager(config=args_config_params): - result = loop.run_until_complete( - self.execute_function( - func, - True, # inline trace: True - **{"params": args_func_params, "config_params": args_config_params}, - ) - ) - - if result.trace: - log.info("\n========= Result =========\n") - - log.info(f"trace_id: {result.trace['trace_id']}") - log.info(f"latency: {result.trace.get('latency')}") - log.info(f"cost: {result.trace.get('cost')}") - log.info(f"usage: {list(result.trace.get('usage', {}).values())}") - - log.info(" ") - log.info("data:") - log.info(dumps(result.data, indent=2)) - - log.info(" ") - log.info("trace:") - log.info("----------------") - log.info(dumps(result.trace.get("spans", []), indent=2)) - log.info("----------------") - - log.info("\n==========================\n") - def override_config_in_schema( self, openapi_schema: dict, @@ -715,259 +560,29 @@ def override_config_in_schema( endpoint: str, config: Type[BaseModel], ): + """Override config in OpenAPI schema to add agenta-specific metadata.""" endpoint = endpoint[1:].replace("/", "_") - schema_to_override = openapi_schema["components"]["schemas"][ - f"Body_{func_name}_{endpoint}_post" - ]["properties"] - # New logic - for param_name, param_val in config.__fields__.items(): - if param_val.annotation is str: - if any( - isinstance(constraint, MultipleChoice) - for constraint in param_val.metadata - ): - choices = next( - constraint.choices - for constraint in param_val.metadata - if isinstance(constraint, MultipleChoice) - ) - if isinstance(choices, dict): - schema_to_override[param_name]["x-parameter"] = "grouped_choice" - schema_to_override[param_name]["choices"] = choices - elif isinstance(choices, list): - schema_to_override[param_name]["x-parameter"] = "choice" - schema_to_override[param_name]["enum"] = choices - else: - schema_to_override[param_name]["x-parameter"] = "text" - if param_val.annotation is bool: - schema_to_override[param_name]["x-parameter"] = "bool" - if param_val.annotation in (int, float): - schema_to_override[param_name]["x-parameter"] = ( - "int" if param_val.annotation is int else "float" - ) - # Check for greater than or equal to constraint - if any(isinstance(constraint, Ge) for constraint in param_val.metadata): - min_value = next( - constraint.ge - for constraint in param_val.metadata - if isinstance(constraint, Ge) - ) - schema_to_override[param_name]["minimum"] = min_value - # Check for greater than constraint - elif any( - isinstance(constraint, Gt) for constraint in param_val.metadata - ): - min_value = next( - constraint.gt - for constraint in param_val.metadata - if isinstance(constraint, Gt) - ) - schema_to_override[param_name]["exclusiveMinimum"] = min_value - # Check for less than or equal to constraint - if any(isinstance(constraint, Le) for constraint in param_val.metadata): - max_value = next( - constraint.le - for constraint in param_val.metadata - if isinstance(constraint, Le) - ) - schema_to_override[param_name]["maximum"] = max_value - # Check for less than constraint - elif any( - isinstance(constraint, Lt) for constraint in param_val.metadata - ): - max_value = next( - constraint.lt - for constraint in param_val.metadata - if isinstance(constraint, Lt) - ) - schema_to_override[param_name]["exclusiveMaximum"] = max_value - - def override_schema( - self, openapi_schema: dict, func_name: str, endpoint: str, params: dict - ): - """ - Overrides the default openai schema generated by fastapi with additional information about: - - The choices available for each MultipleChoiceParam instance - - The min and max values for each FloatParam instance - - The min and max values for each IntParam instance - - The default value for DictInput instance - - The default value for MessagesParam instance - - The default value for FileInputURL instance - - The default value for BinaryParam instance - - ... [PLEASE ADD AT EACH CHANGE] - - Args: - openapi_schema (dict): The openapi schema generated by fastapi - func (str): The name of the function to override - endpoint (str): The name of the endpoint to override - params (dict(param_name, param_val)): The dictionary of the parameters for the function - """ - - def find_in_schema( - schema_type_properties: dict, schema: dict, param_name: str, xparam: str - ): - """Finds a parameter in the schema based on its name and x-parameter value""" - for _, value in schema.items(): - value_title_lower = str(value.get("title")).lower() - value_title = ( - "_".join(value_title_lower.split()) - if len(value_title_lower.split()) >= 2 - else value_title_lower - ) - - if ( - isinstance(value, dict) - and schema_type_properties.get("x-parameter") == xparam - and value_title == param_name - ): - # this will update the default type schema with the properties gotten - # from the schema type (param_val) __schema_properties__ classmethod - for type_key, type_value in schema_type_properties.items(): - # BEFORE: - # value = {'temperature': {'title': 'Temperature'}} - value[type_key] = type_value - # AFTER: - # value = {'temperature': { "type": "number", "title": "Temperature", "x-parameter": "float" }} - return value - - def get_type_from_param(param_val): - param_type = "string" - annotation = param_val.annotation - - if annotation == int: - param_type = "integer" - elif annotation == float: - param_type = "number" - elif annotation == dict: - param_type = "object" - elif annotation == bool: - param_type = "boolean" - elif annotation == list: - param_type = "list" - elif annotation == str: - param_type = "string" - else: - print("ERROR, unhandled annotation:", annotation) - - return param_type - - # Goes from '/some/path' to 'some_path' - endpoint = endpoint[1:].replace("/", "_") - - schema_to_override = openapi_schema["components"]["schemas"][ - f"Body_{func_name}_{endpoint}_post" - ]["properties"] - - for param_name, param_val in params.items(): - if isinstance(param_val, GroupedMultipleChoiceParam): - subschema = find_in_schema( - param_val.__schema_type_properties__(), - schema_to_override, - param_name, - "grouped_choice", - ) - assert ( - subschema - ), f"GroupedMultipleChoiceParam '{param_name}' is in the parameters but could not be found in the openapi.json" - subschema["choices"] = param_val.choices # type: ignore - subschema["default"] = param_val.default # type: ignore - - elif isinstance(param_val, MultipleChoiceParam): - subschema = find_in_schema( - param_val.__schema_type_properties__(), - schema_to_override, - param_name, - "choice", - ) - default = str(param_val) - param_choices = param_val.choices # type: ignore - choices = ( - [default] + param_choices - if param_val not in param_choices - else param_choices - ) - subschema["enum"] = choices - subschema["default"] = ( - default if default in param_choices else choices[0] - ) - - elif isinstance(param_val, FloatParam): - subschema = find_in_schema( - param_val.__schema_type_properties__(), - schema_to_override, - param_name, - "float", - ) - subschema["minimum"] = param_val.minval # type: ignore - subschema["maximum"] = param_val.maxval # type: ignore - subschema["default"] = param_val - - elif isinstance(param_val, IntParam): - subschema = find_in_schema( - param_val.__schema_type_properties__(), - schema_to_override, - param_name, - "int", - ) - subschema["minimum"] = param_val.minval # type: ignore - subschema["maximum"] = param_val.maxval # type: ignore - subschema["default"] = param_val - - elif isinstance(param_val, Parameter) and param_val.annotation is DictInput: - subschema = find_in_schema( - param_val.annotation.__schema_type_properties__(), - schema_to_override, - param_name, - "dict", - ) - subschema["default"] = param_val.default["default_keys"] - - elif isinstance(param_val, TextParam): - subschema = find_in_schema( - param_val.__schema_type_properties__(), - schema_to_override, - param_name, - "text", - ) - subschema["default"] = param_val - - elif ( - isinstance(param_val, Parameter) - and param_val.annotation is MessagesInput - ): - subschema = find_in_schema( - param_val.annotation.__schema_type_properties__(), - schema_to_override, - param_name, - "messages", - ) - subschema["default"] = param_val.default - - elif ( - isinstance(param_val, Parameter) - and param_val.annotation is FileInputURL - ): - subschema = find_in_schema( - param_val.annotation.__schema_type_properties__(), - schema_to_override, - param_name, - "file_url", - ) - subschema["default"] = "https://example.com" - - elif isinstance(param_val, BinaryParam): - subschema = find_in_schema( - param_val.__schema_type_properties__(), - schema_to_override, - param_name, - "bool", - ) - subschema["default"] = param_val.default # type: ignore - else: - subschema = { - "title": str(param_name).capitalize(), - "type": get_type_from_param(param_val), - } - if param_val.default != _empty: - subschema["default"] = param_val.default # type: ignore - schema_to_override[param_name] = subschema + schema_key = f"Body_{func_name}_{endpoint}_post" + schema_to_override = openapi_schema["components"]["schemas"][schema_key] + + # Get the config class name to find its schema + config_class_name = type(config).__name__ + config_schema = openapi_schema["components"]["schemas"][config_class_name] + + # Process each field in the config class + for field_name, field in config.__class__.__fields__.items(): + # Check if field has Annotated metadata for MultipleChoice + if hasattr(field, "metadata") and field.metadata: + for meta in field.metadata: + if isinstance(meta, MultipleChoice): + choices = meta.choices + if isinstance(choices, dict): + config_schema["properties"][field_name].update({ + "x-parameter": "grouped_choice", + "choices": choices + }) + elif isinstance(choices, list): + config_schema["properties"][field_name].update({ + "x-parameter": "choice", + "enum": choices + }) diff --git a/agenta-cli/agenta/sdk/decorators/tracing.py b/agenta-cli/agenta/sdk/decorators/tracing.py index 68f707b694..f368509fc6 100644 --- a/agenta-cli/agenta/sdk/decorators/tracing.py +++ b/agenta-cli/agenta/sdk/decorators/tracing.py @@ -1,8 +1,12 @@ from typing import Callable, Optional, Any, Dict, List, Union + from functools import wraps from itertools import chain from inspect import iscoroutinefunction, getfullargspec +from opentelemetry import baggage as baggage +from opentelemetry.context import attach, detach + from agenta.sdk.utils.exceptions import suppress from agenta.sdk.context.tracing import tracing_context from agenta.sdk.tracing.conventions import parse_span_kind @@ -39,10 +43,12 @@ def __call__(self, func: Callable[..., Any]): is_coroutine_function = iscoroutinefunction(func) @wraps(func) - async def async_wrapper(*args, **kwargs): - async def _async_auto_instrumented(*args, **kwargs): + async def awrapper(*args, **kwargs): + async def aauto_instrumented(*args, **kwargs): self._parse_type_and_kind() + token = self._attach_baggage() + with ag.tracer.start_as_current_span(func.__name__, kind=self.kind): self._pre_instrument(func, *args, **kwargs) @@ -52,13 +58,17 @@ async def _async_auto_instrumented(*args, **kwargs): return result - return await _async_auto_instrumented(*args, **kwargs) + self._detach_baggage(token) + + return await aauto_instrumented(*args, **kwargs) @wraps(func) - def sync_wrapper(*args, **kwargs): - def _sync_auto_instrumented(*args, **kwargs): + def wrapper(*args, **kwargs): + def auto_instrumented(*args, **kwargs): self._parse_type_and_kind() + token = self._attach_baggage() + with ag.tracer.start_as_current_span(func.__name__, kind=self.kind): self._pre_instrument(func, *args, **kwargs) @@ -68,9 +78,11 @@ def _sync_auto_instrumented(*args, **kwargs): return result - return _sync_auto_instrumented(*args, **kwargs) + self._detach_baggage(token) + + return auto_instrumented(*args, **kwargs) - return async_wrapper if is_coroutine_function else sync_wrapper + return awrapper if is_coroutine_function else wrapper def _parse_type_and_kind(self): if not ag.tracing.get_current_span().is_recording(): @@ -78,6 +90,25 @@ def _parse_type_and_kind(self): self.kind = parse_span_kind(self.type) + def _attach_baggage(self): + context = tracing_context.get() + + references = context.references + + token = None + if references: + for k, v in references.items(): + token = attach(baggage.set_baggage(f"ag.refs.{k}", v)) + + return token + + def _detach_baggage( + self, + token, + ): + if token: + detach(token) + def _pre_instrument( self, func, @@ -86,29 +117,21 @@ def _pre_instrument( ): span = ag.tracing.get_current_span() + context = tracing_context.get() + with suppress(): + trace_id = span.context.trace_id + + ag.tracing.credentials[trace_id] = context.credentials + span.set_attributes( attributes={"node": self.type}, namespace="type", ) if span.parent is None: - rctx = tracing_context.get() - - span.set_attributes( - attributes={"configuration": rctx.get("config", {})}, - namespace="meta", - ) - span.set_attributes( - attributes={"environment": rctx.get("environment", {})}, - namespace="meta", - ) span.set_attributes( - attributes={"version": rctx.get("version", {})}, - namespace="meta", - ) - span.set_attributes( - attributes={"variant": rctx.get("variant", {})}, + attributes={"configuration": context.parameters or {}}, namespace="meta", ) @@ -118,6 +141,7 @@ def _pre_instrument( io=self._parse(func, *args, **kwargs), ignore=self.ignore_inputs, ) + span.set_attributes( attributes={"inputs": _inputs}, namespace="data", @@ -161,6 +185,7 @@ def _post_instrument( io=self._patch(result), ignore=self.ignore_outputs, ) + span.set_attributes( attributes={"outputs": _outputs}, namespace="data", @@ -171,15 +196,12 @@ def _post_instrument( with suppress(): if hasattr(span, "parent") and span.parent is None: - tracing_context.set( - tracing_context.get() - | { - "root": { - "trace_id": span.get_span_context().trace_id, - "span_id": span.get_span_context().span_id, - } - } - ) + context = tracing_context.get() + context.link = { + "tree_id": span.get_span_context().trace_id, + "node_id": span.get_span_context().span_id, + } + tracing_context.set(context) def _parse( self, diff --git a/agenta-cli/agenta/sdk/managers/config.py b/agenta-cli/agenta/sdk/managers/config.py index edadbaedc0..d3ec7b97cb 100644 --- a/agenta-cli/agenta/sdk/managers/config.py +++ b/agenta-cli/agenta/sdk/managers/config.py @@ -7,7 +7,7 @@ from pydantic import BaseModel from agenta.sdk.managers.shared import SharedManager -from agenta.sdk.decorators.routing import routing_context +from agenta.sdk.context.routing import routing_context T = TypeVar("T", bound=BaseModel) @@ -20,7 +20,7 @@ class ConfigManager: @staticmethod def get_from_route( schema: Optional[Type[T]] = None, - ) -> Union[Dict[str, Any], T]: + ) -> Optional[Union[Dict[str, Any], T]]: """ Retrieves the configuration from the route context and returns a config object. @@ -47,125 +47,15 @@ def get_from_route( context = routing_context.get() - parameters = None - - if "config" in context and context["config"]: - parameters = context["config"] - - else: - app_id: Optional[str] = None - app_slug: Optional[str] = None - variant_id: Optional[str] = None - variant_slug: Optional[str] = None - variant_version: Optional[int] = None - environment_id: Optional[str] = None - environment_slug: Optional[str] = None - environment_version: Optional[int] = None - - if "application" in context: - app_id = context["application"].get("id") - app_slug = context["application"].get("slug") - - if "variant" in context: - variant_id = context["variant"].get("id") - variant_slug = context["variant"].get("slug") - variant_version = context["variant"].get("version") - - if "environment" in context: - environment_id = context["environment"].get("id") - environment_slug = context["environment"].get("slug") - environment_version = context["environment"].get("version") - - parameters = ConfigManager.get_from_registry( - app_id=app_id, - app_slug=app_slug, - variant_id=variant_id, - variant_slug=variant_slug, - variant_version=variant_version, - environment_id=environment_id, - environment_slug=environment_slug, - environment_version=environment_version, - ) + parameters = context.parameters - if schema: - return schema(**parameters) - - return parameters + if not parameters: + return None - @staticmethod - async def aget_from_route( - schema: Optional[Type[T]] = None, - ) -> Union[Dict[str, Any], T]: - """ - Asynchronously retrieves the configuration from the route context and returns a config object. + if not schema: + return parameters - This method checks the route context for configuration information and returns - an instance of the specified schema based on the available context data. - - Args: - schema (Type[T]): A Pydantic model class that defines the structure of the configuration. - - Returns: - T: An instance of the specified schema populated with the configuration data. - - Raises: - ValueError: If conflicting configuration sources are provided or if no valid - configuration source is found in the context. - - Note: - The method prioritizes the inputs in the following way: - 1. 'config' (i.e. when called explicitly from the playground) - 2. 'environment' - 3. 'variant' - Only one of these should be provided. - """ - - context = routing_context.get() - - parameters = None - - if "config" in context and context["config"]: - parameters = context["config"] - - else: - app_id: Optional[str] = None - app_slug: Optional[str] = None - variant_id: Optional[str] = None - variant_slug: Optional[str] = None - variant_version: Optional[int] = None - environment_id: Optional[str] = None - environment_slug: Optional[str] = None - environment_version: Optional[int] = None - - if "application" in context: - app_id = context["application"].get("id") - app_slug = context["application"].get("slug") - - if "variant" in context: - variant_id = context["variant"].get("id") - variant_slug = context["variant"].get("slug") - variant_version = context["variant"].get("version") - - if "environment" in context: - environment_id = context["environment"].get("id") - environment_slug = context["environment"].get("slug") - environment_version = context["environment"].get("version") - - parameters = await ConfigManager.async_get_from_registry( - app_id=app_id, - app_slug=app_slug, - variant_id=variant_id, - variant_slug=variant_slug, - variant_version=variant_version, - environment_id=environment_id, - environment_slug=environment_slug, - environment_version=environment_version, - ) - - if schema: - return schema(**parameters) - - return parameters + return schema(**parameters) @staticmethod def get_from_registry( diff --git a/agenta-cli/agenta/sdk/managers/vault.py b/agenta-cli/agenta/sdk/managers/vault.py new file mode 100644 index 0000000000..f559af19d2 --- /dev/null +++ b/agenta-cli/agenta/sdk/managers/vault.py @@ -0,0 +1,16 @@ +from typing import Optional, Dict, Any + +from agenta.sdk.context.routing import routing_context + + +class VaultManager: + @staticmethod + def get_from_route() -> Optional[Dict[str, Any]]: + context = routing_context.get() + + secrets = context.secrets + + if not secrets: + return None + + return secrets diff --git a/agenta-cli/agenta/sdk/middleware/auth.py b/agenta-cli/agenta/sdk/middleware/auth.py index c02e46322a..fd82198d05 100644 --- a/agenta-cli/agenta/sdk/middleware/auth.py +++ b/agenta-cli/agenta/sdk/middleware/auth.py @@ -1,90 +1,116 @@ from typing import Callable, Optional -from os import environ -from uuid import UUID + +from os import getenv from json import dumps -from traceback import format_exc import httpx from starlette.middleware.base import BaseHTTPMiddleware -from fastapi import FastAPI, Request, Response +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse -from agenta.sdk.utils.logging import log -from agenta.sdk.middleware.cache import TTLLRUCache +from agenta.sdk.middleware.cache import TTLLRUCache, CACHE_CAPACITY, CACHE_TTL +from agenta.sdk.utils.constants import TRUTHY +from agenta.sdk.utils.exceptions import display_exception -AGENTA_SDK_AUTH_CACHE_CAPACITY = environ.get( - "AGENTA_SDK_AUTH_CACHE_CAPACITY", - 512, -) +import agenta as ag -AGENTA_SDK_AUTH_CACHE_TTL = environ.get( - "AGENTA_SDK_AUTH_CACHE_TTL", - 15 * 60, # 15 minutes -) -AGENTA_SDK_AUTH_CACHE = str(environ.get("AGENTA_SDK_AUTH_CACHE", True)).lower() in ( - "true", - "1", - "t", +_SHARED_SERVICE = getenv("AGENTA_SHARED_SERVICE", "false").lower() in TRUTHY +_CACHE_ENABLED = getenv("AGENTA_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY +_UNAUTHORIZED_ALLOWED = ( + getenv("AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED", "false").lower() in TRUTHY ) +_ALWAYS_ALLOW_LIST = ["/health"] -AGENTA_SDK_AUTH_CACHE = False +_cache = TTLLRUCache(capacity=CACHE_CAPACITY, ttl=CACHE_TTL) -AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED = str( - environ.get("AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED", False) -).lower() in ("true", "1", "t") +class DenyResponse(JSONResponse): + def __init__( + self, + status_code: int = 401, + detail: str = "Unauthorized", + ) -> None: + super().__init__( + status_code=status_code, + content={"detail": detail}, + ) -class Deny(Response): - def __init__(self) -> None: - super().__init__(status_code=401, content="Unauthorized") +class DenyException(Exception): + def __init__( + self, + status_code: int = 401, + content: str = "Unauthorized", + ) -> None: + super().__init__() -cache = TTLLRUCache( - capacity=AGENTA_SDK_AUTH_CACHE_CAPACITY, - ttl=AGENTA_SDK_AUTH_CACHE_TTL, -) + self.status_code = status_code + self.content = content -class AuthorizationMiddleware(BaseHTTPMiddleware): - def __init__( - self, - app: FastAPI, - host: str, - resource_id: UUID, - resource_type: str, - ): +class AuthMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): super().__init__(app) - self.host = host - self.resource_id = resource_id - self.resource_type = resource_type + self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host + self.resource_id = ( + ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.service_id + if not _SHARED_SERVICE + else None + ) + + async def dispatch(self, request: Request, call_next: Callable): + try: + if _UNAUTHORIZED_ALLOWED or request.url.path in _ALWAYS_ALLOW_LIST: + request.state.auth = {} + + else: + credentials = await self._get_credentials(request) + + request.state.auth = {"credentials": credentials} - async def dispatch( - self, - request: Request, - call_next: Callable, - ): - if AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED: return await call_next(request) - try: - authorization = ( - request.headers.get("Authorization") - or request.headers.get("authorization") - or None + except DenyException as deny: + display_exception("Auth Middleware Exception") + + return DenyResponse( + status_code=deny.status_code, + detail=deny.content, ) + except: # pylint: disable=bare-except + display_exception("Auth Middleware Exception") + + return DenyResponse( + status_code=500, + detail="Auth: Unexpected Error.", + ) + + async def _get_credentials(self, request: Request) -> Optional[str]: + try: + authorization = request.headers.get("authorization", None) + headers = {"Authorization": authorization} if authorization else None - cookies = {"sAccessToken": request.cookies.get("sAccessToken")} + access_token = request.cookies.get("sAccessToken", None) + + cookies = {"sAccessToken": access_token} if access_token else None + + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + project_id = ( + # CLEANEST + baggage.get("project_id") + # ALTERNATIVE + or request.query_params.get("project_id") + ) - params = { - "action": "run_service", - "resource_type": self.resource_type, - "resource_id": self.resource_id, - } + params = {"action": "run_service", "resource_type": "service"} - project_id = request.query_params.get("project_id") + if self.resource_id: + params["resource_id"] = self.resource_id if project_id: params["project_id"] = project_id @@ -98,48 +124,57 @@ async def dispatch( sort_keys=True, ) - policy = None - if AGENTA_SDK_AUTH_CACHE: - policy = cache.get(_hash) - - if not policy: - async with httpx.AsyncClient() as client: - response = await client.get( - f"{self.host}/api/permissions/verify", - headers=headers, - cookies=cookies, - params=params, - ) + if _CACHE_ENABLED: + credentials = _cache.get(_hash) + + if credentials: + return credentials - if response.status_code != 200: - cache.put(_hash, {"effect": "deny"}) - return Deny() + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.host}/api/permissions/verify", + headers=headers, + cookies=cookies, + params=params, + ) - auth = response.json() + if response.status_code == 401: + raise DenyException( + status_code=401, + content="Invalid credentials", + ) + elif response.status_code == 403: + raise DenyException( + status_code=403, + content="Service execution not allowed.", + ) + elif response.status_code != 200: + raise DenyException( + status_code=400, + content="Auth: Unexpected Error.", + ) - if auth.get("effect") != "allow": - cache.put(_hash, {"effect": "deny"}) - return Deny() + auth = response.json() - policy = { - "effect": "allow", - "credentials": auth.get("credentials"), - } + if auth.get("effect") != "allow": + raise DenyException( + status_code=403, + content="Service execution not allowed.", + ) - cache.put(_hash, policy) + credentials = auth.get("credentials") - if not policy or policy.get("effect") == "deny": - return Deny() + _cache.put(_hash, credentials) - request.state.credentials = policy.get("credentials") + return credentials - return await call_next(request) + except DenyException as deny: + raise deny - except: # pylint: disable=bare-except - log.warning("------------------------------------------------------") - log.warning("Agenta SDK - handling auth middleware exception below:") - log.warning("------------------------------------------------------") - log.warning(format_exc().strip("\n")) - log.warning("------------------------------------------------------") + except Exception as exc: # pylint: disable=bare-except + display_exception("Auth Middleware Exception (suppressed)") - return Deny() + raise DenyException( + status_code=500, + content="Auth: Unexpected Error.", + ) from exc diff --git a/agenta-cli/agenta/sdk/middleware/cache.py b/agenta-cli/agenta/sdk/middleware/cache.py index 5445b1fafc..641f4f802d 100644 --- a/agenta-cli/agenta/sdk/middleware/cache.py +++ b/agenta-cli/agenta/sdk/middleware/cache.py @@ -1,6 +1,10 @@ +from os import getenv from time import time from collections import OrderedDict +CACHE_CAPACITY = int(getenv("AGENTA_MIDDLEWARE_CACHE_CAPACITY", "512")) +CACHE_TTL = int(getenv("AGENTA_MIDDLEWARE_CACHE_TTL", str(5 * 60))) # 5 minutes + class TTLLRUCache: def __init__(self, capacity: int, ttl: int): diff --git a/agenta-cli/agenta/sdk/middleware/config.py b/agenta-cli/agenta/sdk/middleware/config.py new file mode 100644 index 0000000000..8ea9eb9ffe --- /dev/null +++ b/agenta-cli/agenta/sdk/middleware/config.py @@ -0,0 +1,254 @@ +from typing import Callable, Optional, Tuple, Dict + +from os import getenv +from json import dumps + +from pydantic import BaseModel + +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import Request, FastAPI + +import httpx + +from agenta.sdk.middleware.cache import TTLLRUCache, CACHE_CAPACITY, CACHE_TTL +from agenta.sdk.utils.constants import TRUTHY +from agenta.sdk.utils.exceptions import suppress + +import agenta as ag + + +_CACHE_ENABLED = getenv("AGENTA_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY + +_cache = TTLLRUCache(capacity=CACHE_CAPACITY, ttl=CACHE_TTL) + + +class Reference(BaseModel): + id: Optional[str] = None + slug: Optional[str] = None + version: Optional[str] = None + + +class ConfigMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host + self.application_id = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.app_id + + async def dispatch( + self, + request: Request, + call_next: Callable, + ): + request.state.config = {} + + with suppress(): + parameters, references = await self._get_config(request) + + request.state.config = { + "parameters": parameters, + "references": references, + } + + return await call_next(request) + + # @atimeit + async def _get_config(self, request: Request) -> Optional[Tuple[Dict, Dict]]: + credentials = request.state.auth.get("credentials") + + headers = None + if credentials: + headers = {"Authorization": credentials} + + application_ref = await self._parse_application_ref(request) + variant_ref = await self._parse_variant_ref(request) + environment_ref = await self._parse_environment_ref(request) + + refs = {} + if application_ref: + refs["application_ref"] = application_ref.model_dump() + if variant_ref: + refs["variant_ref"] = variant_ref.model_dump() + if environment_ref: + refs["environment_ref"] = environment_ref.model_dump() + + if not refs: + return None, None + + _hash = dumps( + { + "headers": headers, + "refs": refs, + }, + sort_keys=True, + ) + + if _CACHE_ENABLED: + config_cache = _cache.get(_hash) + + if config_cache: + parameters = config_cache.get("parameters") + references = config_cache.get("references") + + return parameters, references + + config = None + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self.host}/api/variants/configs/fetch", + headers=headers, + json=refs, + ) + + if response.status_code != 200: + return None, None + + config = response.json() + + if not config: + _cache.put(_hash, {"parameters": None, "references": None}) + + return None, None + + parameters = config.get("params") + + references = {} + + for ref_key in ["application_ref", "variant_ref", "environment_ref"]: + refs = config.get(ref_key) + ref_prefix = ref_key.split("_", maxsplit=1)[0] + + for ref_part_key in ["id", "slug", "version"]: + ref_part = refs.get(ref_part_key) + + if ref_part: + references[ref_prefix + "." + ref_part_key] = ref_part + + _cache.put(_hash, {"parameters": parameters, "references": references}) + + return parameters, references + + async def _parse_application_ref( + self, + request: Request, + ) -> Optional[Reference]: + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + body = {} + try: + body = await request.json() + except: # pylint: disable=bare-except + pass + + application_id = ( + # CLEANEST + baggage.get("application_id") + # ALTERNATIVE + or request.query_params.get("application_id") + # LEGACY + or request.query_params.get("app_id") + or self.application_id + ) + application_slug = ( + # CLEANEST + baggage.get("application_slug") + # ALTERNATIVE + or request.query_params.get("application_slug") + # LEGACY + or request.query_params.get("app_slug") + or body.get("app") + ) + + if not any([application_id, application_slug]): + return None + + return Reference( + id=application_id, + slug=application_slug, + ) + + async def _parse_variant_ref( + self, + request: Request, + ) -> Optional[Reference]: + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + body = {} + try: + body = await request.json() + except: # pylint: disable=bare-except + pass + + variant_id = ( + # CLEANEST + baggage.get("variant_id") + # ALTERNATIVE + or request.query_params.get("variant_id") + ) + variant_slug = ( + # CLEANEST + baggage.get("variant_slug") + # ALTERNATIVE + or request.query_params.get("variant_slug") + # LEGACY + or request.query_params.get("config") + or body.get("config") + ) + variant_version = ( + # CLEANEST + baggage.get("variant_version") + # ALTERNATIVE + or request.query_params.get("variant_version") + ) + + if not any([variant_id, variant_slug, variant_version]): + return None + + return Reference( + id=variant_id, + slug=variant_slug, + version=variant_version, + ) + + async def _parse_environment_ref( + self, + request: Request, + ) -> Optional[Reference]: + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + body = {} + try: + body = await request.json() + except: # pylint: disable=bare-except + pass + + environment_id = ( + # CLEANEST + baggage.get("environment_id") + # ALTERNATIVE + or request.query_params.get("environment_id") + ) + environment_slug = ( + # CLEANEST + baggage.get("environment_slug") + # ALTERNATIVE + or request.query_params.get("environment_slug") + # LEGACY + or request.query_params.get("environment") + or body.get("environment") + ) + environment_version = ( + # CLEANEST + baggage.get("environment_version") + # ALTERNATIVE + or request.query_params.get("environment_version") + ) + + if not any([environment_id, environment_slug, environment_version]): + return None + + return Reference( + id=environment_id, + slug=environment_slug, + version=environment_version, + ) diff --git a/agenta-cli/agenta/sdk/middleware/cors.py b/agenta-cli/agenta/sdk/middleware/cors.py new file mode 100644 index 0000000000..80f0a30fc5 --- /dev/null +++ b/agenta-cli/agenta/sdk/middleware/cors.py @@ -0,0 +1,27 @@ +from os import getenv + +from starlette.types import ASGIApp, Receive, Scope, Send +from fastapi.middleware.cors import CORSMiddleware as _CORSMiddleware + +_TRUTHY = {"true", "1", "t", "y", "yes", "on", "enable", "enabled"} +_USE_CORS = getenv("AGENTA_USE_CORS", "enable").lower() in _TRUTHY + + +class CORSMiddleware(_CORSMiddleware): + def __init__(self, app: ASGIApp): + if _USE_CORS: + super().__init__( + app=app, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], + allow_credentials=True, + expose_headers=None, + max_age=None, + ) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if _USE_CORS: + return await super().__call__(scope, receive, send) + + return await self.app(scope, receive, send) diff --git a/agenta-cli/agenta/sdk/middleware/otel.py b/agenta-cli/agenta/sdk/middleware/otel.py new file mode 100644 index 0000000000..0a6396f979 --- /dev/null +++ b/agenta-cli/agenta/sdk/middleware/otel.py @@ -0,0 +1,40 @@ +from typing import Callable + +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import Request, FastAPI + +from opentelemetry.baggage.propagation import W3CBaggagePropagator + +from agenta.sdk.utils.exceptions import suppress + + +class OTelMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + async def dispatch(self, request: Request, call_next: Callable): + request.state.otel = {} + + with suppress(): + baggage = await self._get_baggage(request) + + request.state.otel = {"baggage": baggage} + + return await call_next(request) + + async def _get_baggage( + self, + request, + ): + _baggage = {"baggage": request.headers.get("Baggage", "")} + + context = W3CBaggagePropagator().extract(_baggage) + + baggage = {} + + if context: + for partial in context.values(): + for key, value in partial.items(): + baggage[key] = value + + return baggage diff --git a/agenta-cli/agenta/sdk/middleware/vault.py b/agenta-cli/agenta/sdk/middleware/vault.py new file mode 100644 index 0000000000..c7b6a8877f --- /dev/null +++ b/agenta-cli/agenta/sdk/middleware/vault.py @@ -0,0 +1,158 @@ +from typing import Callable, Dict, Optional + +from enum import Enum +from os import getenv +from json import dumps + +from pydantic import BaseModel + +import httpx +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import FastAPI, Request + +from agenta.sdk.middleware.cache import TTLLRUCache, CACHE_CAPACITY, CACHE_TTL +from agenta.sdk.utils.constants import TRUTHY +from agenta.sdk.utils.exceptions import suppress, display_exception + +import agenta as ag + + +# TODO: Move to backend client types +class SecretKind(str, Enum): + PROVIDER_KEY = "provider_key" + + +# TODO: Move to backend client types +class ProviderKind(str, Enum): + OPENAI = "openai" + COHERE = "cohere" + ANYSCALE = "anyscale" + DEEPINFRA = "deepinfra" + ALEPHALPHA = "alephalpha" + GROQ = "groq" + MISTRALAI = "mistralai" + ANTHROPIC = "anthropic" + PERPLEXITYAI = "perplexityai" + TOGETHERAI = "togetherai" + OPENROUTER = "openrouter" + GEMINI = "gemini" + + +# TODO: Move to backend client types +class ProviderKeyDTO(BaseModel): + provider: ProviderKind + key: str + + +# TODO: Move to backend client types +class SecretDTO(BaseModel): + kind: SecretKind = "provider_key" + data: ProviderKeyDTO + + +_CACHE_ENABLED = getenv("AGENTA_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY + +_cache = TTLLRUCache(capacity=CACHE_CAPACITY, ttl=CACHE_TTL) + + +class VaultMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host + + async def dispatch( + self, + request: Request, + call_next: Callable, + ): + request.state.vault = {} + + with suppress(): + secrets = await self._get_secrets(request) + + request.state.vault = {"secrets": secrets} + + return await call_next(request) + + async def _get_secrets(self, request: Request) -> Optional[Dict]: + credentials = request.state.auth.get("credentials") + + headers = None + if credentials: + headers = {"Authorization": credentials} + + _hash = dumps( + { + "headers": headers, + }, + sort_keys=True, + ) + + if _CACHE_ENABLED: + secrets_cache = _cache.get(_hash) + + if secrets_cache: + secrets = secrets_cache.get("secrets") + + return secrets + + local_secrets = [] + + try: + for provider_kind in ProviderKind: + provider = provider_kind.value + key_name = f"{provider.upper()}_API_KEY" + key = getenv(key_name) + + if not key: + continue + + secret = SecretDTO( + kind=SecretKind.PROVIDER_KEY, + data=ProviderKeyDTO( + provider=provider, + key=key, + ), + ) + + local_secrets.append(secret.model_dump()) + except: # pylint: disable=bare-except + display_exception("Vault: Local Secrets Exception") + + vault_secrets = [] + + try: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.host}/api/vault/v1/secrets", + headers=headers, + ) + + if response.status_code != 200: + vault_secrets = [] + + else: + vault = response.json() + + vault_secrets = vault.get("secrets") + except: # pylint: disable=bare-except + display_exception("Vault: Vault Secrets Exception") + + merged_secrets = {} + + if local_secrets: + for secret in local_secrets: + provider = secret["data"]["provider"] + merged_secrets[provider] = secret + + if vault_secrets: + for secret in vault_secrets: + provider = secret["data"]["provider"] + merged_secrets[provider] = secret + + secrets = list(merged_secrets.values()) + + _cache.put(_hash, {"secrets": secrets}) + + return secrets diff --git a/agenta-cli/agenta/sdk/tracing/exporters.py b/agenta-cli/agenta/sdk/tracing/exporters.py index 62f03a10b5..7a38201d5a 100644 --- a/agenta-cli/agenta/sdk/tracing/exporters.py +++ b/agenta-cli/agenta/sdk/tracing/exporters.py @@ -9,6 +9,11 @@ ) from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.context.exporting import ( + exporting_context_manager, + exporting_context, + ExportingContext, +) class InlineTraceExporter(SpanExporter): @@ -58,8 +63,41 @@ def fetch( return trace -OTLPSpanExporter._MAX_RETRY_TIMEOUT = 2 # pylint: disable=protected-access +class OTLPExporter(OTLPSpanExporter): + _MAX_RETRY_TIMEOUT = 2 + + def __init__(self, *args, credentials: Dict[int, str] = None, **kwargs): + super().__init__(*args, **kwargs) + + self.credentials = credentials + + def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: + credentials = None + + if self.credentials: + trace_ids = set(span.get_span_context().trace_id for span in spans) + + if len(trace_ids) == 1: + trace_id = trace_ids.pop() + + if trace_id in self.credentials: + credentials = self.credentials.pop(trace_id) + + with exporting_context_manager( + context=ExportingContext( + credentials=credentials, + ) + ): + return super().export(spans) + + def _export(self, serialized_data: bytes): + credentials = exporting_context.get().credentials + + if credentials: + self._session.headers.update({"Authorization": credentials}) + + return super()._export(serialized_data) + ConsoleExporter = ConsoleSpanExporter InlineExporter = InlineTraceExporter -OTLPExporter = OTLPSpanExporter diff --git a/agenta-cli/agenta/sdk/tracing/inline.py b/agenta-cli/agenta/sdk/tracing/inline.py index 6905ad5cf0..3bf55cdf82 100644 --- a/agenta-cli/agenta/sdk/tracing/inline.py +++ b/agenta-cli/agenta/sdk/tracing/inline.py @@ -101,8 +101,8 @@ class NodeDTO(BaseModel): Data = Dict[str, Any] Metrics = Dict[str, Any] Metadata = Dict[str, Any] -Tags = Dict[str, str] -Refs = Dict[str, str] +Tags = Dict[str, Any] +Refs = Dict[str, Any] class LinkDTO(BaseModel): diff --git a/agenta-cli/agenta/sdk/tracing/processors.py b/agenta-cli/agenta/sdk/tracing/processors.py index b5d04d8085..2c612220cc 100644 --- a/agenta-cli/agenta/sdk/tracing/processors.py +++ b/agenta-cli/agenta/sdk/tracing/processors.py @@ -1,5 +1,6 @@ from typing import Optional, Dict, List +from opentelemetry.baggage import get_all as get_baggage from opentelemetry.context import Context from opentelemetry.sdk.trace import Span from opentelemetry.sdk.trace.export import ( @@ -11,8 +12,7 @@ ) from agenta.sdk.utils.logging import log - -# LOAD CONTEXT, HERE ! +from agenta.sdk.tracing.conventions import Reference class TraceProcessor(BatchSpanProcessor): @@ -43,9 +43,17 @@ def on_start( span: Span, parent_context: Optional[Context] = None, ) -> None: + baggage = get_baggage(parent_context) + for key in self.references.keys(): span.set_attribute(f"ag.refs.{key}", self.references[key]) + for key in baggage.keys(): + if key.startswith("ag.refs."): + _key = key.replace("ag.refs.", "") + if _key in [_.value for _ in Reference.__members__.values()]: + span.set_attribute(key, baggage[key]) + if span.context.trace_id not in self._registry: self._registry[span.context.trace_id] = dict() @@ -89,7 +97,7 @@ def force_flush( ret = super().force_flush(timeout_millis) if not ret: - log.warning("Agenta SDK - skipping export due to timeout.") + log.warning("Agenta - Skipping export due to timeout.") def is_ready( self, diff --git a/agenta-cli/agenta/sdk/tracing/tracing.py b/agenta-cli/agenta/sdk/tracing/tracing.py index 809c864936..0e92bb9d19 100644 --- a/agenta-cli/agenta/sdk/tracing/tracing.py +++ b/agenta-cli/agenta/sdk/tracing/tracing.py @@ -41,6 +41,8 @@ def __init__( self.headers: Dict[str, str] = dict() # REFERENCES self.references: Dict[str, str] = dict() + # CREDENTIALS + self.credentials: Dict[int, str] = dict() # TRACER PROVIDER self.tracer_provider: Optional[TracerProvider] = None @@ -60,13 +62,16 @@ def __init__( def configure( self, api_key: Optional[str] = None, + service_id: Optional[str] = None, # DEPRECATING app_id: Optional[str] = None, ): # HEADERS (OTLP) if api_key: - self.headers["Authorization"] = api_key + self.headers["Authorization"] = f"ApiKey {api_key}" # REFERENCES + if service_id: + self.references["service.id"] = service_id if app_id: self.references["application.id"] = app_id @@ -84,31 +89,28 @@ def configure( self.tracer_provider.add_span_processor(self.inline) # TRACE PROCESSORS -- OTLP try: - log.info("--------------------------------------------") log.info( - "Agenta SDK - connecting to otlp receiver at: %s", + "Agenta - OLTP URL: %s", self.otlp_url, ) - log.info("--------------------------------------------") - check( - self.otlp_url, - headers=self.headers, - timeout=1, - ) + # check( + # self.otlp_url, + # headers=self.headers, + # timeout=1, + # ) _otlp = TraceProcessor( OTLPExporter( endpoint=self.otlp_url, headers=self.headers, + credentials=self.credentials, ), references=self.references, ) self.tracer_provider.add_span_processor(_otlp) - log.info("Success: traces will be exported.") - log.info("--------------------------------------------") except: # pylint: disable=bare-except - log.warning("Agenta SDK - traces will not be exported.") + log.warning("Agenta - OLTP unreachable, skipping exports.") # GLOBAL TRACER PROVIDER -- INSTRUMENTATION LIBRARIES set_tracer_provider(self.tracer_provider) diff --git a/agenta-cli/agenta/sdk/types.py b/agenta-cli/agenta/sdk/types.py index cefe92825a..cd12a99a13 100644 --- a/agenta-cli/agenta/sdk/types.py +++ b/agenta-cli/agenta/sdk/types.py @@ -6,6 +6,9 @@ from agenta.client.backend.types.agenta_node_dto import AgentaNodeDto from agenta.client.backend.types.agenta_nodes_response import AgentaNodesResponse +from typing import Annotated, List, Union, Optional, Dict, Literal, Any +from pydantic import BaseModel, Field, root_validator +from agenta.sdk.assets import supported_llm_models @dataclass @@ -13,12 +16,6 @@ class MultipleChoice: choices: Union[List[str], Dict[str, List[str]]] -class InFile: - def __init__(self, file_name: str, file_path: str): - self.file_name = file_name - self.file_path = file_path - - class LLMTokenUsage(BaseModel): completion_tokens: int prompt_tokens: int @@ -26,8 +23,9 @@ class LLMTokenUsage(BaseModel): class BaseResponse(BaseModel): - version: Optional[str] = "3.0" + version: Optional[str] = "3.1" data: Optional[Union[str, Dict[str, Any]]] = None + content_type: Optional[str] = "string" tree: Optional[AgentaNodesResponse] = None @@ -248,3 +246,297 @@ class Prompt(BaseModel): top_p: float frequency_penalty: float presence_penalty: float + +# ----------------------------------------------------- +# New Prompt model +# ----------------------------------------------------- + + +class ToolCall(BaseModel): + id: str + type: Literal["function"] = "function" + function: Dict[str, str] + +class Message(BaseModel): + role: Literal["system", "user", "assistant", "tool", "function"] + content: Optional[str] = None + name: Optional[str] = None + tool_calls: Optional[List[ToolCall]] = None + tool_call_id: Optional[str] = None + +class ResponseFormatText(BaseModel): + type: Literal["text"] + """The type of response format being defined: `text`""" + + +class ResponseFormatJSONObject(BaseModel): + type: Literal["json_object"] + """The type of response format being defined: `json_object`""" + + +class JSONSchema(BaseModel): + name: str + """The name of the response format.""" + description: Optional[str] = None + """A description of what the response format is for.""" + schema_: Optional[Dict[str, object]] = Field(alias="schema", default=None) + """The schema for the response format, described as a JSON Schema object.""" + strict: Optional[bool] = None + """Whether to enable strict schema adherence.""" + + model_config = { + "populate_by_name": True, + "json_schema_extra": {"required": ["name", "schema"]} + } + + +class ResponseFormatJSONSchema(BaseModel): + type: Literal["json_schema"] + """The type of response format being defined: `json_schema`""" + json_schema: JSONSchema + + +ResponseFormat = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema] + +class ModelConfig(BaseModel): + """Configuration for model parameters""" + model: Annotated[str, MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo", + description="ID of the model to use" + ) + temperature: Optional[float] = Field( + default=None, + ge=0.0, + le=2.0, + description="What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic" + ) + max_tokens: Optional[int] = Field( + default=None, + ge=0, + description="The maximum number of tokens that can be generated in the chat completion" + ) + top_p: Optional[float] = Field( + default=None, + ge=0.0, + le=1.0, + description="An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass" + ) + frequency_penalty: Optional[float] = Field( + default=None, + ge=-2.0, + le=2.0, + description="Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far" + ) + presence_penalty: Optional[float] = Field( + default=None, + ge=-2.0, + le=2.0, + description="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far" + ) + response_format: Optional[ResponseFormat] = Field( + default=None, + description="An object specifying the format that the model must output" + ) + stream: Optional[bool] = Field( + default=None, + description="If set, partial message deltas will be sent" + ) + tools: Optional[List[Dict]] = Field( + default=None, + description="A list of tools the model may call. Currently, only functions are supported as a tool" + ) + tool_choice: Optional[Union[Literal["none", "auto"], Dict]] = Field( + default=None, + description="Controls which (if any) tool is called by the model" + ) + +class PromptTemplateError(Exception): + """Base exception for all PromptTemplate errors""" + pass + +class InputValidationError(PromptTemplateError): + """Raised when input validation fails""" + def __init__(self, message: str, missing: Optional[set] = None, extra: Optional[set] = None): + self.missing = missing + self.extra = extra + super().__init__(message) + +class TemplateFormatError(PromptTemplateError): + """Raised when template formatting fails""" + def __init__(self, message: str, original_error: Optional[Exception] = None): + self.original_error = original_error + super().__init__(message) + +class PromptTemplate(BaseModel): + """A template for generating prompts with formatting capabilities""" + messages: List[Message] = Field( + default=[ + Message(role="system", content=""), + Message(role="user", content="") + ] + ) + system_prompt: Optional[str] = None + user_prompt: Optional[str] = None + template_format: Literal["fstring", "jinja2", "curly"] = Field( + default="fstring", + description="Format type for template variables: fstring {var}, jinja2 {{ var }}, or curly {{var}}" + ) + input_keys: Optional[List[str]] = Field( + default=None, + description="Optional list of input keys for validation. If not provided, any inputs will be accepted" + ) + llm_config: ModelConfig = Field( + default_factory=ModelConfig, + description="Configuration for the model parameters" + ) + + model_config = { + "json_schema_extra": { + "x-parameters": { + "prompt": "true", + } + } + } + + + @root_validator(pre=True) + def init_messages(cls, values): + if "messages" not in values: + messages = [] + if "system_prompt" in values and values["system_prompt"]: + messages.append(Message(role="system", content=values["system_prompt"])) + if "user_prompt" in values and values["user_prompt"]: + messages.append(Message(role="user", content=values["user_prompt"])) + if messages: + values["messages"] = messages + return values + + def _format_with_template(self, content: str, kwargs: Dict[str, Any]) -> str: + """Internal method to format content based on template_format""" + try: + if self.template_format == "fstring": + return content.format(**kwargs) + elif self.template_format == "jinja2": + from jinja2 import Template, TemplateError + try: + return Template(content).render(**kwargs) + except TemplateError as e: + raise TemplateFormatError( + f"Jinja2 template error in content: '{content}'. Error: {str(e)}", + original_error=e + ) + elif self.template_format == "curly": + import re + result = content + for key, value in kwargs.items(): + result = re.sub(r'\{\{' + key + r'\}\}', str(value), result) + if re.search(r'\{\{.*?\}\}', result): + unreplaced = re.findall(r'\{\{(.*?)\}\}', result) + raise TemplateFormatError( + f"Unreplaced variables in curly template: {unreplaced}" + ) + return result + else: + raise TemplateFormatError(f"Unknown template format: {self.template_format}") + except KeyError as e: + key = str(e).strip("'") + raise TemplateFormatError( + f"Missing required variable '{key}' in template: '{content}'" + ) + except Exception as e: + raise TemplateFormatError( + f"Error formatting template '{content}': {str(e)}", + original_error=e + ) + + def format(self, **kwargs) -> 'PromptTemplate': + """ + Format the template with provided inputs. + Only validates against input_keys if they are specified. + + Raises: + InputValidationError: If input validation fails + TemplateFormatError: If template formatting fails + """ + # Validate inputs if input_keys is set + if self.input_keys is not None: + missing = set(self.input_keys) - set(kwargs.keys()) + extra = set(kwargs.keys()) - set(self.input_keys) + + error_parts = [] + if missing: + error_parts.append(f"Missing required inputs: {', '.join(sorted(missing))}") + if extra: + error_parts.append(f"Unexpected inputs: {', '.join(sorted(extra))}") + + if error_parts: + raise InputValidationError( + " | ".join(error_parts), + missing=missing if missing else None, + extra=extra if extra else None + ) + + new_messages = [] + for i, msg in enumerate(self.messages): + if msg.content: + try: + new_content = self._format_with_template(msg.content, kwargs) + except TemplateFormatError as e: + raise TemplateFormatError( + f"Error in message {i} ({msg.role}): {str(e)}", + original_error=e.original_error + ) + else: + new_content = None + + new_messages.append(Message( + role=msg.role, + content=new_content, + name=msg.name, + tool_calls=msg.tool_calls, + tool_call_id=msg.tool_call_id + )) + + return PromptTemplate( + messages=new_messages, + template_format=self.template_format, + llm_config=self.llm_config, + input_keys=self.input_keys + ) + + def to_openai_kwargs(self) -> dict: + """Convert the prompt template to kwargs compatible with litellm/openai""" + kwargs = { + "model": self.llm_config.model, + "messages": [msg.dict(exclude_none=True) for msg in self.messages], + } + + # Add optional parameters only if they are set + if self.llm_config.temperature is not None: + kwargs["temperature"] = self.llm_config.temperature + + if self.llm_config.top_p is not None: + kwargs["top_p"] = self.llm_config.top_p + + if self.llm_config.stream is not None: + kwargs["stream"] = self.llm_config.stream + + if self.llm_config.max_tokens is not None: + kwargs["max_tokens"] = self.llm_config.max_tokens + + if self.llm_config.frequency_penalty is not None: + kwargs["frequency_penalty"] = self.llm_config.frequency_penalty + + if self.llm_config.presence_penalty is not None: + kwargs["presence_penalty"] = self.llm_config.presence_penalty + + if self.llm_config.response_format: + kwargs["response_format"] = self.llm_config.response_format.dict(by_alias=True) + + if self.llm_config.tools: + kwargs["tools"] = self.llm_config.tools + # Only set tool_choice if tools are present + if self.llm_config.tool_choice is not None: + kwargs["tool_choice"] = self.llm_config.tool_choice + + return kwargs \ No newline at end of file diff --git a/agenta-cli/agenta/sdk/utils/constants.py b/agenta-cli/agenta/sdk/utils/constants.py new file mode 100644 index 0000000000..fc2e1ae25d --- /dev/null +++ b/agenta-cli/agenta/sdk/utils/constants.py @@ -0,0 +1 @@ +TRUTHY = {"true", "1", "t", "y", "yes", "on", "enable", "enabled"} diff --git a/agenta-cli/agenta/sdk/utils/exceptions.py b/agenta-cli/agenta/sdk/utils/exceptions.py index a451b1de78..a1d5cb3793 100644 --- a/agenta-cli/agenta/sdk/utils/exceptions.py +++ b/agenta-cli/agenta/sdk/utils/exceptions.py @@ -6,6 +6,17 @@ from agenta.sdk.utils.logging import log +def display_exception(message: str): + _len = len("Agenta - ") + len(message) + len(":") + _bar = "-" * _len + + log.warning(_bar) + log.warning("Agenta - %s:", message) + log.warning(_bar) + log.warning(format_exc().strip("\n")) + log.warning(_bar) + + class suppress(AbstractContextManager): # pylint: disable=invalid-name def __init__(self): pass @@ -14,15 +25,10 @@ def __enter__(self): pass def __exit__(self, exc_type, exc_value, exc_tb): - if exc_type is None: - return True - else: - log.warning("-------------------------------------------------") - log.warning("Agenta SDK - suppressing tracing exception below:") - log.warning("-------------------------------------------------") - log.warning(format_exc().strip("\n")) - log.warning("-------------------------------------------------") - return True + if exc_type is not None: + display_exception("Exception (suppressed)") + + return True def handle_exceptions(): @@ -33,12 +39,10 @@ def decorator(func): async def async_wrapper(*args, **kwargs): try: return await func(*args, **kwargs) + except Exception as e: - log.warning("------------------------------------------") - log.warning("Agenta SDK - intercepting exception below:") - log.warning("------------------------------------------") - log.warning(format_exc().strip("\n")) - log.warning("------------------------------------------") + display_exception("Exception") + raise e @wraps(func) @@ -46,11 +50,8 @@ def sync_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: - log.warning("------------------------------------------") - log.warning("Agenta SDK - intercepting exception below:") - log.warning("------------------------------------------") - log.warning(format_exc().strip("\n")) - log.warning("------------------------------------------") + display_exception("Exception") + raise e return async_wrapper if is_coroutine_function else sync_wrapper diff --git a/agenta-cli/agenta/sdk/utils/globals.py b/agenta-cli/agenta/sdk/utils/globals.py index f05141e089..ceae076427 100644 --- a/agenta-cli/agenta/sdk/utils/globals.py +++ b/agenta-cli/agenta/sdk/utils/globals.py @@ -1,14 +1,10 @@ -import agenta +import agenta as ag def set_global(config=None, tracing=None): - """Allows usage of agenta.config and agenta.tracing in the user's code. + """Allows usage of agenta.config and agenta.tracing in the user's code.""" - Args: - config: _description_. Defaults to None. - tracing: _description_. Defaults to None. - """ if config is not None: - agenta.config = config + ag.config = config if tracing is not None: - agenta.tracing = tracing + ag.tracing = tracing diff --git a/agenta-cli/agenta/sdk/utils/timing.py b/agenta-cli/agenta/sdk/utils/timing.py new file mode 100644 index 0000000000..c73b5f210d --- /dev/null +++ b/agenta-cli/agenta/sdk/utils/timing.py @@ -0,0 +1,58 @@ +import time +from functools import wraps + +from agenta.sdk.utils.logging import log + + +def timeit(func): + @wraps(func) + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + + execution_time = end_time - start_time + + if execution_time < 1e-3: + time_value = execution_time * 1e6 + unit = "us" + elif execution_time < 1: + time_value = execution_time * 1e3 + unit = "ms" + else: + time_value = execution_time + unit = "s" + + class_name = args[0].__class__.__name__ if args else None + + log.info(f"'{class_name}.{func.__name__}' executed in {time_value:.4f} {unit}.") + return result + + return wrapper + + +def atimeit(func): + @wraps(func) + async def wrapper(*args, **kwargs): + start_time = time.time() + result = await func(*args, **kwargs) + end_time = time.time() + + execution_time = end_time - start_time + + if execution_time < 1e-3: + time_value = execution_time * 1e6 + unit = "us" + elif execution_time < 1: + time_value = execution_time * 1e3 + unit = "ms" + else: + time_value = execution_time + unit = "s" + + class_name = args[0].__class__.__name__ if args else None + + log.info(f"'{class_name}.{func.__name__}' executed in {time_value:.4f} {unit}.") + return result + + return wrapper diff --git a/agenta-cli/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py b/agenta-cli/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py index ea4ad8e45d..3ea54d1570 100644 --- a/agenta-cli/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py +++ b/agenta-cli/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py @@ -19,9 +19,6 @@ class MyConfig(BaseModel): ) max_tokens: int = Field(default=-1, ge=-1, le=4000) prompt_system: str = Field(default=SYSTEM_PROMPT) - multiselect: Annotated[str, ag.MultipleChoice(choices=["a", "b", "c"])] = Field( - default="a" - ) @ag.route("/llm_call", config_schema=MyConfig) diff --git a/agenta-cli/debugging/simple-app/.agentaignore b/agenta-cli/debugging/simple-app/.agentaignore new file mode 100644 index 0000000000..003430f461 --- /dev/null +++ b/agenta-cli/debugging/simple-app/.agentaignore @@ -0,0 +1,7 @@ +# Environments +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +myenv/ diff --git a/agenta-cli/debugging/simple-app/Dockerfile b/agenta-cli/debugging/simple-app/Dockerfile new file mode 100644 index 0000000000..9eb6b06a54 --- /dev/null +++ b/agenta-cli/debugging/simple-app/Dockerfile @@ -0,0 +1,13 @@ +FROM agentaai/templates_v2:main + +WORKDIR /app + +COPY . . + +RUN pip install --no-cache-dir --disable-pip-version-check -U agenta +RUN pip install --no-cache-dir --disable-pip-version-check -r requirements.txt + +EXPOSE 80 + +RUN ["chmod", "+x", "./entrypoint.sh"] +CMD ["./entrypoint.sh"] diff --git a/agenta-cli/debugging/simple-app/Dockerfile.cloud b/agenta-cli/debugging/simple-app/Dockerfile.cloud new file mode 100644 index 0000000000..633521b95c --- /dev/null +++ b/agenta-cli/debugging/simple-app/Dockerfile.cloud @@ -0,0 +1,9 @@ +FROM public.ecr.aws/h3w6n5z0/agentaai/lambda_templates_public:main + +COPY requirements.txt ${LAMBDA_TASK_ROOT} +RUN pip install --no-cache-dir --disable-pip-version-check -U agenta +RUN pip install --no-cache-dir --disable-pip-version-check -r requirements.txt +RUN pip install --no-cache-dir --disable-pip-version-check mangum +COPY . ${LAMBDA_TASK_ROOT} + +CMD [ "lambda_function.handler" ] diff --git a/agenta-cli/debugging/simple-app/_app.py b/agenta-cli/debugging/simple-app/_app.py new file mode 100644 index 0000000000..9e7337c9ac --- /dev/null +++ b/agenta-cli/debugging/simple-app/_app.py @@ -0,0 +1,78 @@ +import agenta as ag +from agenta import FloatParam, TextParam +from openai import OpenAI +from pydantic import BaseModel, Field +from typing import Annotated, List + +client = OpenAI() +import os + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init() + + +class Prompt(BaseModel): + prompt_template: str = Field(default=default_prompt) + model_config = { + "json_schema_extra": { + "x-component-type": "prompt-playground", + "x-component-props": { + "supportedModels": ["gpt-3", "gpt-4"], + "allowTemplating": True, + }, + } + } + + +class Message(BaseModel): + role: str = Field(default="user") + content: str = Field(default="") + model_config = { + "json_schema_extra": { + "x-component-type": "message", + "x-component-props": { + "supportedModels": ["gpt-3", "gpt-4"], + "allowTemplating": True, + }, + } + } + + +class BabyConfig(BaseModel): + temperature: float = Field(default=0.2) + prompt_template: str = Field(default=default_prompt) + model: Annotated[str, ag.MultipleChoice(choices=["asd", "asd2"])] = Field( + default="asd" + ) + prompt: Prompt = Field(default=Prompt()) + + +@ag.route("/", config_schema=BabyConfig) +def generate(country: str, gender: str, messages: Message) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name. + """ + config = ag.ConfigManager.get_from_route(schema=BabyConfig) + prompt = config.prompt_template.format(country=country, gender=gender) + + chat_completion = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": prompt}], + temperature=config.temperature, + ) + token_usage = chat_completion.usage.dict() + return { + "message": chat_completion.choices[0].message.content, + **{"usage": token_usage}, + "cost": ag.calculate_token_usage("gpt-3.5-turbo", token_usage), + } diff --git a/agenta-cli/debugging/simple-app/agenta/__init__.py b/agenta-cli/debugging/simple-app/agenta/__init__.py new file mode 100644 index 0000000000..53c65db70f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/__init__.py @@ -0,0 +1,73 @@ +from typing import Any, Callable, Optional + +from .sdk.utils.preinit import PreInitObject + +import agenta.client.backend.types as client_types # pylint: disable=wrong-import-order + +from .sdk.types import ( + DictInput, + MultipleChoice, + FloatParam, + InFile, + IntParam, + MultipleChoiceParam, + GroupedMultipleChoiceParam, + MessagesInput, + TextParam, + FileInputURL, + BinaryParam, + Prompt, +) + +from .sdk.utils.logging import log as logging +from .sdk.tracing import Tracing, get_tracer +from .sdk.decorators.tracing import instrument +from .sdk.tracing.conventions import Reference +from .sdk.decorators.routing import entrypoint, app, route +from .sdk.agenta_init import Config, AgentaSingleton, init as _init +from .sdk.utils.costs import calculate_token_usage +from .sdk.client import Agenta +from .sdk.litellm import litellm as callbacks +from .sdk.managers.config import ConfigManager +from .sdk.managers.variant import VariantManager +from .sdk.managers.deployment import DeploymentManager +from .sdk import assets as assets +from .sdk import tracer + +config = PreInitObject("agenta.config", Config) +DEFAULT_AGENTA_SINGLETON_INSTANCE = AgentaSingleton() + +types = client_types + +api = None +async_api = None + +tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore +tracer = get_tracer(tracing) + + +def init( + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, +): + global api, async_api, tracing, tracer # pylint: disable=global-statement + + _init( + host=host, + api_key=api_key, + config_fname=config_fname, + redact=redact, + redact_on_error=redact_on_error, + app_id=app_id, + ) + + api = DEFAULT_AGENTA_SINGLETON_INSTANCE.api # type: ignore + async_api = DEFAULT_AGENTA_SINGLETON_INSTANCE.async_api # type: ignore + + tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore + tracer = get_tracer(tracing) diff --git a/agenta-cli/debugging/simple-app/agenta/cli/evaluation_commands.py b/agenta-cli/debugging/simple-app/agenta/cli/evaluation_commands.py new file mode 100644 index 0000000000..76e00f9694 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/cli/evaluation_commands.py @@ -0,0 +1,22 @@ +import click +from agenta.client import client + + +@click.group() +def evaluation(): + """Commands for evaluations.""" + pass + + +# TODO: Remove hardcoded values +@evaluation.command(name="run") +def run_evaluation_cli(): + """Run an evaluation.""" + + try: + client.run_evaluation( + app_name="sss", + host="http://localhost", + ) + except Exception as ex: + click.echo(click.style(f"Error while running evaluation: {ex}", fg="red")) diff --git a/agenta-cli/debugging/simple-app/agenta/cli/helper.py b/agenta-cli/debugging/simple-app/agenta/cli/helper.py new file mode 100644 index 0000000000..15c945218d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/cli/helper.py @@ -0,0 +1,202 @@ +import os +import sys +import toml +import click +import questionary +from pathlib import Path +from typing import Any, List, MutableMapping +from agenta.client.api_models import AppVariant + + +from typing import Any, Optional +from pathlib import Path +import toml + +from agenta.client.backend.client import AgentaApi + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") +POSTHOG_KEY = os.environ.get( + "POSTHOG_KEY", "phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7" +) + + +def get_global_config(var_name: str) -> Optional[Any]: + """ + Get the value of a global configuration variable. + + Args: + var_name: the name of the variable to get + + Returns: + the value of the variable, or None if it doesn't exist + """ + agenta_dir = Path.home() / ".agenta" + if not agenta_dir.exists(): + return None + agenta_config_file = agenta_dir / "config.toml" + if not agenta_config_file.exists(): + return None + global_config = toml.load(agenta_config_file) + if var_name not in global_config: + return None + return global_config[var_name] + + +def set_global_config(var_name: str, var_value: Any) -> None: + """ + Set the value of a global configuration variable. + + Args: + var_name: the name of the variable to set + var_value: the value to set the variable to + """ + agenta_dir = Path.home() / ".agenta" + if not agenta_dir.exists(): + agenta_dir.mkdir(exist_ok=True) + agenta_config_file = agenta_dir / "config.toml" + if not agenta_config_file.exists(): + config = {} + with agenta_config_file.open("w") as config_file: + toml.dump(config, config_file) + global_config = toml.load(agenta_config_file) + global_config[var_name] = var_value + with open(agenta_config_file, "w") as config_file: + toml.dump(global_config, config_file) + + +def get_api_key(backend_host: str) -> str: + """ + Retrieve or request the API key for accessing the Agenta platform. + + This function first looks for an existing API key in the global config file. + If found, it prompts the user to confirm whether they'd like to use that key. + If not found, it asks the user to input a new key. + + Args: + backend_host (str): The URL of the backend host. + + Returns: + str: The API key to be used for accessing the Agenta platform. + + Raises: + SystemExit: If the user cancels the input by pressing Ctrl+C. + """ + + api_key = get_global_config("api_key") + if api_key: + # API key exists in the config file, ask for confirmation + confirm_api_key = questionary.confirm( + f"API Key found: {api_key}\nDo you want to use this API Key?" + ).ask() + + if confirm_api_key: + return api_key + elif confirm_api_key is None: # User pressed Ctrl+C + sys.exit(0) + + api_key = questionary.text( + f"(You can get your API Key here: {backend_host}/settings?tab=apiKeys) " + "Please provide your API key:" + ).ask() + + if api_key: + set_global_config("api_key", api_key.strip()) + + return api_key + elif api_key is None: # User pressed Ctrl+C + sys.exit(0) + + +def init_telemetry_config() -> None: + if ( + get_global_config("telemetry_tracking_enabled") is None + or get_global_config("telemetry_api_key") is None + ): + set_global_config("telemetry_tracking_enabled", True) + set_global_config( + "telemetry_api_key", + POSTHOG_KEY, + ) + + +def update_variants_from_backend( + app_id: str, + config: MutableMapping[str, Any], + host: str, + api_key: str = None, +) -> MutableMapping[str, Any]: + """Reads the list of variants from the backend and updates the config accordingly + + Arguments: + app_id -- the app id + config -- the config loaded using toml.load + api_key -- the api key to use for authentication + + Returns: + a new config object later to be saved using toml.dump(config, config_file.open('w')) + """ + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + try: + variants: List[AppVariant] = client.apps.list_app_variants(app_id=app_id) + except Exception as ex: + raise ex + + config["variants"] = [variant.variant_name for variant in variants] + config["variant_ids"] = [variant.variant_id for variant in variants] + return config + + +def update_config_from_backend(config_file: Path, host: str): + """Updates the config file with new information from the backend + + Arguments: + config_file -- the path to the config file + """ + assert config_file.exists(), "Config file does not exist!" + config = toml.load(config_file) + app_id = config["app_id"] + api_key = config.get("api_key", "") + if "variants" not in config: + config["variants"] = [] + if "variant_ids" not in config: + config["variant_ids"] = [] + config = update_variants_from_backend(app_id, config, host, api_key) + toml.dump(config, config_file.open("w")) + + +def display_app_variant(variant: AppVariant): + """Prints a variant nicely in the terminal""" + click.echo( + click.style("App Name: ", bold=True, fg="green") + + click.style(variant.app_name, fg="green") + ) + click.echo( + click.style("Variant Name: ", bold=True, fg="blue") + + click.style(variant.variant_name, fg="blue") + ) + click.echo(click.style("Parameters: ", bold=True, fg="cyan")) + if variant.parameters: + for param, value in variant.parameters.items(): + click.echo( + click.style(f" {param}: ", fg="cyan") + + click.style(str(value), fg="cyan") + ) + else: + click.echo(click.style(" Defaults from code", fg="cyan")) + if variant.previous_variant_name: + click.echo( + click.style("Template Variant Name: ", bold=True, fg="magenta") + + click.style(variant.previous_variant_name, fg="magenta") + ) + else: + click.echo( + click.style("Template Variant Name: ", bold=True, fg="magenta") + + click.style("None", fg="magenta") + ) + click.echo( + click.style("-" * 50, bold=True, fg="white") + ) # a line for separating each variant diff --git a/agenta-cli/debugging/simple-app/agenta/cli/main.py b/agenta-cli/debugging/simple-app/agenta/cli/main.py new file mode 100644 index 0000000000..1f17272048 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/cli/main.py @@ -0,0 +1,229 @@ +import os +import re +import shutil +import sys +from typing import Union +from pathlib import Path + +import click +import questionary +import toml + +from agenta.cli import helper +from agenta.cli import variant_configs +from agenta.cli import variant_commands +from agenta.cli import evaluation_commands + +from agenta.client.backend.client import AgentaApi + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") + + +def print_version(ctx, param, value): + if not value or ctx.resilient_parsing: + return + try: + try: + from importlib.metadata import PackageNotFoundError, version + except ImportError: + from importlib_metadata import PackageNotFoundError, version + package_version = version("agenta") + except PackageNotFoundError: + package_version = "package is not installed" + click.echo(f"Agenta CLI version: {package_version}") + ctx.exit() + + +def check_latest_version() -> Union[str, None]: + import requests + + try: + response = requests.get("https://pypi.org/pypi/agenta/json", timeout=360) + response.raise_for_status() + latest_version = response.json()["info"]["version"] + return latest_version + except (requests.RequestException, KeyError): + return None + + +def notify_update(available_version: str): + import importlib.metadata + + installed_version = importlib.metadata.version("agenta") + if available_version > installed_version: + click.echo( + click.style( + f"A new release of agenta is available: {installed_version} → {available_version}", + fg="yellow", + ) + ) + click.echo( + click.style("To upgrade, run: pip install --upgrade agenta", fg="yellow") + ) + + +@click.group() +@click.option( + "--version", + "-v", + is_flag=True, + callback=print_version, + expose_value=False, + is_eager=True, +) +def cli(): + latest_version = check_latest_version() + if latest_version: + notify_update(latest_version) + + +@click.command() +@click.option("--app-name", "--app_name", default=None) +@click.option("--backend-host", "backend_host", default=None) +def init(app_name: str, backend_host: str): + """Initialize a new Agenta app with the template files.""" + + init_option = "Blank App" if backend_host != "" and app_name != "" else "" + + api_key = os.getenv("AGENTA_API_KEY") + + if not app_name: + while True: + app_name = questionary.text("Please enter the app name").ask() + if app_name and re.match("^[a-zA-Z0-9_-]+$", app_name): + break + else: + if app_name is None: # User pressed Ctrl+C + sys.exit(0) + else: + print( + "Invalid input. Please use only alphanumeric characters without spaces." + ) + + try: + backend_hosts = { + "https://cloud.agenta.ai": "On agenta cloud", + "http://localhost": "On my local machine", + } + where_question = backend_hosts.get(backend_host, "On a remote machine") + if not backend_host: + where_question = questionary.select( + "Where are you running agenta?", + choices=[ + "On agenta cloud", + "On my local machine", + "On a remote machine", + ], + ).ask() + + if where_question == "On my local machine": + backend_host = "http://localhost" + elif where_question == "On a remote machine": + backend_host = questionary.text( + "Please provide the IP or URL of your remote host" + ).ask() + elif where_question == "On agenta cloud": + global_backend_host = helper.get_global_config("host") + if global_backend_host: + backend_host = global_backend_host + else: + backend_host = "https://cloud.agenta.ai" + + if not api_key: + api_key = helper.get_api_key(backend_host) + + elif where_question is None: # User pressed Ctrl+C + sys.exit(0) + backend_host = ( + backend_host + if backend_host.startswith("http://") or backend_host.startswith("https://") + else "http://" + backend_host + ) + + # initialize the client with the backend url and api key + client = AgentaApi( + base_url=f"{backend_host}/{BACKEND_URL_SUFFIX}", + api_key=api_key if where_question == "On agenta cloud" else "", + ) + + # Get app_id after creating new app in the backend server + try: + app_id = client.apps.create_app(app_name=app_name).app_id + except Exception as ex: + click.echo(click.style(f"Error: {ex}", fg="red")) + sys.exit(1) + + # Set app toml configuration + config = { + "app_name": app_name, + "app_id": app_id, + "backend_host": backend_host, + "api_key": api_key if where_question == "On agenta cloud" else None, + } + with open("config.toml", "w") as config_file: + toml.dump(config, config_file) + + # Ask for init option + if not init_option: + init_option = questionary.select( + "How do you want to initialize your app?", + choices=["Blank App", "Start from template"], + ).ask() + + # If the user selected the second option, show a list of available templates + if init_option == "Start from template": + current_dir = Path.cwd() + template_dir = Path(__file__).parent.parent / "templates" + templates = [ + folder.name for folder in template_dir.iterdir() if folder.is_dir() + ] + template_desc = [ + toml.load((template_dir / name / "template.toml"))["short_desc"] + for name in templates + ] + + # Show the templates to the user + template = questionary.select( + "Which template do you want to use?", + choices=[ + questionary.Choice( + title=f"{template} - {template_desc}", value=template + ) + for template, template_desc in zip(templates, template_desc) + ], + ).ask() + + # Copy the template files to the current directory + chosen_template_dir = template_dir / template + for file in chosen_template_dir.glob("*"): + if file.name != "template.toml" and not file.is_dir(): + shutil.copy(file, current_dir / file.name) + elif init_option is None: # User pressed Ctrl+C + sys.exit(0) + + # Create a .gitignore file and add some default environment folder names to it + gitignore_content = ( + "# Environments \nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\nmyenv/\n" + ) + if not os.path.exists(".agentaignore"): + with open(".agentaignore", "w") as gitignore_file: + gitignore_file.write(gitignore_content) + + click.echo("App initialized successfully") + if init_option == "Start from template": + click.echo( + "Please check the README.md for further instructions to setup the template." + ) + except Exception as ex: + click.echo(click.style(f"Error: {ex}", fg="red")) + sys.exit(1) + + +# Add the commands to the CLI group +cli.add_command(init) +cli.add_command(variant_configs.config) +cli.add_command(variant_commands.variant) +cli.add_command(evaluation_commands.evaluation) + +if __name__ == "__main__": + cli() diff --git a/agenta-cli/debugging/simple-app/agenta/cli/telemetry.py b/agenta-cli/debugging/simple-app/agenta/cli/telemetry.py new file mode 100644 index 0000000000..76e2c8b7e0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/cli/telemetry.py @@ -0,0 +1,50 @@ +# Stdlib Imports +from uuid import uuid4 + +# Own Imports +from agenta.cli import helper + +# Third party Imports +from posthog import Posthog + + +# Load telemetry configuration +helper.init_telemetry_config() + + +class EventTracking(Posthog): + _instance = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__(self, api_key: str, host: str) -> None: + super(Posthog, self).__init__(api_key, host) + + def capture_event( + self, + event_name: str, + body: dict, + ) -> None: + """ + Captures an event. + + Args: + event_name (str): The name of the event being captured. + body (dict): Contains the data associated with the event being captured. + """ + + # A unique identifier for the user or entity associated with the event + distinct_id = helper.get_global_config("telemetry_distinct_id") + if not distinct_id: + distinct_id = uuid4() + helper.set_global_config("telemetry_distinct_id", str(distinct_id)) + self.capture(distinct_id, event_name, body) + + +# Initialize event tracking +event_track = EventTracking( + helper.get_global_config("telemetry_api_key"), "https://app.posthog.com" +) diff --git a/agenta-cli/debugging/simple-app/agenta/cli/variant_commands.py b/agenta-cli/debugging/simple-app/agenta/cli/variant_commands.py new file mode 100644 index 0000000000..ca144843bd --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/cli/variant_commands.py @@ -0,0 +1,526 @@ +import os +import re +import sys +from typing import List +from pathlib import Path + +from requests.exceptions import ConnectionError + +import click +import questionary +import toml + +from agenta.cli import helper +from agenta.cli.telemetry import event_track +from agenta.client.backend.client import AgentaApi +from agenta.client.api import add_variant_to_server +from agenta.client.api_models import AppVariant, Image +from agenta.docker.docker_utils import build_tar_docker_container +from agenta.client.backend.types.variant_action import VariantAction + + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") + + +@click.group() +def variant(): + """Commands for variants""" + pass + + +def add_variant( + app_folder: str, file_name: str, host: str, overwrite: bool, config_name="default" +) -> str: + """ + Adds a variant to the backend. Sends the code as a tar to the backend, which then containerizes it and adds it to the backend store. + The app variant name to be added is + {file_name.removesuffix(".py")}.{config_name} + Args: + variant_name: the name of the variant + app_folder: the folder of the app + file_name: the name of the file to run. + config_name: the name of the config to use for now it is always default + Returns: + the name of the code base and variant(useful for serve) + """ + + app_path = Path(app_folder) + config_file = app_path / "config.toml" + config = toml.load(config_file) + + app_name = config["app_name"] + app_id = config["app_id"] + api_key = config.get("api_key", "") + + config_name = "default" + base_name = file_name.removesuffix(".py") + variant_name = f"{base_name}.{config_name}" + + # check files in folder + app_file = app_path / file_name + if not app_file.exists(): + click.echo( + click.style( + f"No {file_name} exists! Please make sure you are in the right directory", + fg="red", + ) + ) + return None + + env_file = app_path / ".env" + if not env_file.exists(): + continue_without_env = questionary.confirm( + "No .env file found! Are you sure you handled the API keys needed in your application?\n Do you want to continue without it?" + ).ask() + if not continue_without_env: + click.echo("Operation cancelled.") + sys.exit(0) + + requirements_file = app_path / "requirements.txt" + if not requirements_file.exists(): + continue_without_requirements = questionary.confirm( + "No requirements.txt file found! Are you sure you do not need it in your application?\n Do you want to continue without it?" + ).ask() + if not continue_without_requirements: + click.echo("Operation cancelled.") + sys.exit(0) + + # Validate variant name + if not re.match("^[a-zA-Z0-9_]+$", base_name): + click.echo( + click.style( + "Invalid input. Please use only alphanumeric characters without spaces in the filename.", + fg="red", + ) + ) + sys.exit(0) + + # update the config file with the variant names from the backend + variant_name = f"{base_name}.{config_name}" + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + if variant_name in config["variants"] and not overwrite: + if not overwrite: + overwrite = questionary.confirm( + "This variant already exists. Do you want to overwrite it?" + ).ask() + if not overwrite: + click.echo("Operation cancelled.") + return + + try: + click.echo( + click.style( + f"Preparing code base {base_name} into a tar file...", + fg="bright_black", + ) + ) + tar_path = build_tar_docker_container(folder=app_path, file_name=file_name) + + click.echo( + click.style( + f"Building code base {base_name} for {variant_name} into a docker image...", + fg="bright_black", + ) + ) + with tar_path.open("rb") as tar_file: + built_image: Image = client.containers.build_image( + app_id=app_id, + base_name=base_name, + tar_file=tar_file, + ) + image = Image(**built_image.dict()) + if tar_path.exists(): + tar_path.unlink() + + except Exception as ex: + click.echo(click.style(f"Error while building image: {ex}", fg="red")) + raise + try: + if overwrite: + click.echo( + click.style( + f"Updating {base_name} to server...", + fg="bright_black", + ) + ) + variant_id = config["variant_ids"][config["variants"].index(variant_name)] + client.variants.update_variant_image( + variant_id=variant_id, + docker_id=image.docker_id, + tags=image.tags, + type=image.type, + ) # this automatically restarts + else: + click.echo(click.style(f"Adding {variant_name} to server...", fg="yellow")) + response = add_variant_to_server( + app_id, + base_name, + image, + f"{host}/{BACKEND_URL_SUFFIX}", + api_key, + ) + variant_id = response["variant_id"] + config["variants"].append(variant_name) + config["variant_ids"].append(variant_id) + except Exception as ex: + if overwrite: + click.echo(click.style(f"Error while updating variant: {ex}", fg="red")) + else: + click.echo(click.style(f"Error while adding variant: {ex}", fg="red")) + raise + + agenta_dir = Path.home() / ".agenta" + global_toml_file = toml.load(agenta_dir / "config.toml") + tracking_enabled: bool = global_toml_file["telemetry_tracking_enabled"] + if overwrite: + # Track a deployment event + if tracking_enabled: + get_user_id = client.fetch_user_profile() + user_id = get_user_id["id"] + event_track.capture_event( + "app_deployment", + body={ + "app_id": app_id, + "deployed_by": user_id, + "environment": "CLI", + "version": "cloud" if api_key else "oss", + }, + ) + + click.echo( + click.style( + f"Variant {variant_name} for App {app_name} updated successfully 🎉", + bold=True, + fg="green", + ) + ) + else: + # Track a deployment event + if tracking_enabled: + get_user_id = client.fetch_user_profile() + user_id = get_user_id["id"] + event_track.capture_event( + "app_deployment", + body={ + "app_id": app_id, + "deployed_by": user_id, + "environment": "CLI", + "version": "cloud" if api_key else "oss", + }, + ) + + click.echo( + click.style( + f"Variant {variant_name} for App {app_name} added successfully to Agenta!", + fg="green", + ) + ) + # Last step us to save the config file + toml.dump(config, config_file.open("w")) + if overwrite: + # In the case we are overwriting, don't return anything. Otherwise the command server would attempt to start the container which would result in an error!!! + # TODO: Improve this stupid design + return None + else: + return variant_id + + +def start_variant(variant_id: str, app_folder: str, host: str): + """ + Starts a container for an existing variant + Args: + variant_name: the name of the variant + app_folder: the folder of the app + """ + app_folder = Path(app_folder) + config_file = app_folder / "config.toml" + config = toml.load(config_file) + app_id = config["app_id"] + api_key = config.get("api_key", "") + + if len(config["variants"]) == 0: + click.echo("No variants found. Please add a variant first.") + return + + if variant_id: + if variant_id not in config["variant_ids"]: + click.echo( + click.style( + f"Variant {variant_id} not found in backend. Maybe you removed it in the webUI?", + fg="red", + ) + ) + return + else: + variant_name = questionary.select( + "Please choose a variant", choices=config["variants"] + ).ask() + variant_id = config["variant_ids"][config["variants"].index(variant_name)] + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + variant = client.variants.start_variant( + variant_id=variant_id, + action=VariantAction(action="START"), + ) + endpoint = variant.uri + click.echo("\n" + click.style("Congratulations! 🎉", bold=True, fg="green")) + click.echo( + click.style("Your app has been deployed locally as an API. 🚀", fg="cyan") + + click.style(" You can access it here: ", fg="white") + + click.style(f"{endpoint}/", bold=True, fg="yellow") + ) + + click.echo( + click.style("\nRead the API documentation. 📚", fg="cyan") + + click.style(" It's available at: ", fg="white") + + click.style(f"{endpoint}/docs", bold=True, fg="yellow") + ) + + webui_host = "http://localhost" if host == "localhost" else host + click.echo( + click.style( + "\nStart experimenting with your app in the playground. 🎮", + fg="cyan", + ) + + click.style(" Go to: ", fg="white") + + click.style(f"{webui_host}/apps/{app_id}/playground", bold=True, fg="yellow") + + "\n" + ) + + +def remove_variant(variant_name: str, app_folder: str, host: str): + """ + Removes a variant from the server + Args: + variant_name: the name of the variant + app_folder: the folder of the app + """ + config_file = Path(app_folder) / "config.toml" + config = toml.load(config_file) + app_name = config["app_name"] + api_key = config.get("api_key", "") + + if not config["variants"]: + click.echo( + click.style( + f"No variants found for app {app_name}. Make sure you have deployed at least one variant.", + fg="red", + ) + ) + return + + if variant_name: + if variant_name not in config["variants"]: + click.echo( + click.style( + f"Variant {variant_name} not found in backend. Maybe you already removed it in the webUI?", + fg="red", + ) + ) + return + else: + variant_name = questionary.select( + "Please choose a variant", choices=config["variants"] + ).ask() + variant_id = config["variant_ids"][config["variants"].index(variant_name)] + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + try: + client.variants.remove_variant(variant_id=variant_id) + except Exception as ex: + click.echo( + click.style( + f"Error while removing variant {variant_name} for App {app_name} from the backend", + fg="red", + ) + ) + click.echo(click.style(f"Error message: {ex}", fg="red")) + return + + click.echo( + click.style( + f"Variant {variant_name} for App {app_name} removed successfully from Agenta!", + fg="green", + ) + ) + + +def list_variants(app_folder: str, host: str): + """List available variants for an app and print them to the console + + Arguments: + app_folder -- _description_ + """ + config_file = Path(app_folder) / "config.toml" + config = toml.load(config_file) + app_name = config["app_name"] + app_id = config["app_id"] + api_key = config.get("api_key", "") + variants = [] + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + try: + variants: List[AppVariant] = client.apps.list_app_variants(app_id=app_id) + except Exception as ex: + raise ex + + if variants: + for variant in variants: + helper.display_app_variant(variant) + else: + click.echo(click.style(f"No variants found for app {app_name}", fg="red")) + + +def config_check(app_folder: str): + """Check the config file and update it from the backend + + Arguments: + app_folder -- the app folder + """ + + click.echo(click.style("\nChecking and updating config file...", fg="bright_black")) + app_folder = Path(app_folder) + config_file = app_folder / "config.toml" + if not config_file.exists(): + click.echo( + click.style( + f"Config file not found in {app_folder}. Make sure you are in the right folder and that you have run agenta init first.", + fg="red", + ) + ) + return + host = get_host(app_folder) # TODO: Refactor the whole config thing + helper.update_config_from_backend(config_file, host=host) + + +def get_host(app_folder: str) -> str: + """Fetches the host from the config""" + app_folder = Path(app_folder) + config_file = app_folder / "config.toml" + config = toml.load(config_file) + if "backend_host" not in config: + host = "http://localhost" + else: + host = config["backend_host"] + return host + + +@variant.command(name="remove") +@click.option("--app_folder", default=".") +@click.option("--variant_name", default="") +def remove_variant_cli(variant_name: str, app_folder: str): + """Remove an existing variant.""" + + try: + config_check(app_folder) + remove_variant( + variant_name=variant_name, + app_folder=app_folder, + host=get_host(app_folder), + ) + except Exception as ex: + click.echo(click.style(f"Error while removing variant: {ex}", fg="red")) + + +@variant.command( + name="serve", + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True, + ), +) +@click.option("--app_folder", default=".") +@click.option("--file_name", default=None, help="The name of the file to run") +@click.option( + "--overwrite", + is_flag=True, + help="Overwrite the existing variant if it exists", +) +@click.pass_context +def serve_cli(ctx, app_folder: str, file_name: str, overwrite: bool): + """Adds a variant to the web UI and serves the API locally.""" + + if not file_name: + if ctx.args: + file_name = ctx.args[0] + else: + error_msg = "To serve variant, kindly provide the filename and run:\n" + error_msg += ">>> agenta variant serve --file_name .py\n" + error_msg += "or\n" + error_msg += ">>> agenta variant serve .py" + click.echo(click.style(f"{error_msg}", fg="red")) + sys.exit(1) + + try: + config_check(app_folder) + except Exception as e: + click.echo(click.style("Failed during configuration check.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + try: + host = get_host(app_folder) + except Exception as e: + click.echo(click.style("Failed to retrieve the host.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + try: + api_key = helper.get_global_config("api_key") + except Exception as e: + click.echo(click.style("Failed to retrieve the api key.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + try: + variant_id = add_variant( + app_folder=app_folder, file_name=file_name, host=host, overwrite=overwrite + ) + except Exception as e: + click.echo(click.style("Failed to add variant.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + if variant_id: + try: + start_variant(variant_id=variant_id, app_folder=app_folder, host=host) + except ConnectionError: + error_msg = "Failed to connect to Agenta backend. Here's how you can solve the issue:\n" + error_msg += "- First, please ensure that the backend service is running and accessible.\n" + error_msg += ( + "- Second, try restarting the containers (if using Docker Compose)." + ) + click.echo(click.style(f"{error_msg}", fg="red")) + sys.exit(1) + except Exception as e: + click.echo(click.style("Failed to start container with LLM app.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + +@variant.command(name="list") +@click.option("--app_folder", default=".") +def list_variants_cli(app_folder: str): + """List the variants in the backend""" + try: + config_check(app_folder) + list_variants(app_folder=app_folder, host=get_host(app_folder)) + except Exception as ex: + click.echo(click.style(f"Error while listing variants: {ex}", fg="red")) diff --git a/agenta-cli/debugging/simple-app/agenta/cli/variant_configs.py b/agenta-cli/debugging/simple-app/agenta/cli/variant_configs.py new file mode 100644 index 0000000000..3f43bbc104 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/cli/variant_configs.py @@ -0,0 +1,49 @@ +import click +from agenta.cli import helper + + +@click.group() +def config(): + """Commands for variants configurations""" + pass + + +def update_backend_host(backend_host: str): + """Check the config file and update the backend URL + + Arguments: + app_folder -- the app folder + backend_host -- the backend host + """ + + click.echo( + click.style("\nChecking and updating global backend host...", fg="bright_black") + ) + helper.set_global_config("host", backend_host) + + +@config.command( + name="set-host", + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True, + ), +) +@click.option( + "--backend_host", default=None, help="The URL of the backend host to use." +) +@click.pass_context +def set_config_url(ctx, backend_host: str): + """Set the backend URL in the app configuration""" + + try: + if not backend_host: + if ctx.args: + backend_host = ctx.args[0] + else: + click.echo(click.style("Backend host URL not specified", fg="red")) + + update_backend_host(backend_host) + click.echo(click.style("Backend host updated successfully! 🎉\n")) + except Exception as ex: + click.echo(click.style(f"Error updating backend host: {ex}", fg="red")) diff --git a/agenta-cli/debugging/simple-app/agenta/client/Readme.md b/agenta-cli/debugging/simple-app/agenta/client/Readme.md new file mode 100644 index 0000000000..39c8adc31f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/Readme.md @@ -0,0 +1,104 @@ +Client code to communicate with the backend. + +Currently the models are manually copied from the backend code. This needs to change. + +# Generate Backend + +To generate the client code using Fern, follow the steps below. + +1. Open a Terminal and navigate to the folder where this Readme.md file is. For example; +```bash +cd agenta/agenta-cli/agenta/client +``` + +2. Next ensure you have installed Fern by executing the command; +```bash +npm install -g fern-api +``` + +3. Execute this command to initialize Fern to import and use the OpenAPI spec; + +> To use an OpenAPI spec, you can pass in the filepath or URL. +> We'll need to log in to use fern. +> We'll be using a url to the openapi.json for [Agenta Cloud](https://cloud.agenta.ai). +> Alternatively, for `cloud-dev` we could use [Cloud Local](http://localhost). + +```bash +fern init --openapi https://cloud.agenta.ai/api/openapi.json +# fern init --openapi http://localhost/api/openapi.json +``` + +4. Add the Fern Python SDK; +```bash +fern add fern-python-sdk +``` + +5. Go to the generators.yml, which would look like this; +```yaml +default-group: local +groups: + local: + generators: + - name: fernapi/fern-typescript-node-sdk + version: 0.9.5 + output: + location: local-file-system + path: ../sdks/typescript + - name: fernapi/fern-python-sdk + version: 0.6.0 +``` + +6. Remove `fernapi/fern-typescript-node-sdk`; +```yaml +default-group: local +groups: + local: + generators: + - name: fernapi/fern-python-sdk + version: 3.10.6 +``` + +7. Update `fernapi/fern-python-sdk`, which would look like this; +```yaml +default-group: local +groups: + local: + generators: + - name: fernapi/fern-python-sdk + version: 3.10.6 + output: + location: local-file-system + path: ../backend +``` +image + + +8. Go to the fern.config.json file and change the value of "organization" to `agenta` +image + + +9. Generate the client code +```bash + fern generate +``` + +10. Go to `./backend/containers/client.py`, search for the `build_image` function in the AgentaApi class and update `timeout_in_seconds` to `600` in `request_options'. It should now look like this; +```python +_response = self._client_wrapper.httpx_client.request( + "containers/build_image", + method="POST", + params={ + "app_id": app_id, + "base_name": base_name, + }, + data={}, + files={ + "tar_file": tar_file, + }, + request_options={**request_options, "timeout_in_seconds": 600}, + omit=OMIT, +) +``` +image + +11. Delete the `./fern` folder. diff --git a/agenta-cli/debugging/simple-app/agenta/client/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/debugging/simple-app/agenta/client/api.py b/agenta-cli/debugging/simple-app/agenta/client/api.py new file mode 100644 index 0000000000..1bb5880bed --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/api.py @@ -0,0 +1,74 @@ +import os +import toml +import time +import click +from typing import Dict +from pathlib import Path +from agenta.client.backend import client +from agenta.client.api_models import Image +from requests.exceptions import RequestException +from agenta.client.backend.client import AgentaApi +from agenta.client.exceptions import APIRequestError + + +def add_variant_to_server( + app_id: str, + base_name: str, + image: Image, + backend_url: str, + api_key: str, + retries=10, + backoff_factor=1, +) -> Dict: + """ + Adds a variant to the server with a retry mechanism and a single-line loading state. + + Args: + app_id (str): The ID of the app to add the variant to. + base_name (str): The base name for the variant. + image (Image): The image to use for the variant. + retries (int): Number of times to retry the request. + backoff_factor (float): Factor to determine the delay between retries (exponential backoff). + + Returns: + dict: The JSON response from the server. + + Raises: + APIRequestError: If the request to the server fails after retrying. + """ + + click.echo( + click.style("Waiting for the variant to be ready", fg="yellow"), nl=False + ) + + client = AgentaApi( + base_url=backend_url, + api_key=api_key, + ) + for attempt in range(retries): + try: + response = client.apps.add_variant_from_image( + app_id=app_id, + variant_name=f"{base_name.lower()}.default", + base_name=base_name, + config_name="default", + docker_id=image.docker_id, + tags=image.tags, + ) + click.echo(click.style("\nVariant added successfully!", fg="green")) + return response + except RequestException as e: + if attempt < retries - 1: + click.echo(click.style(".", fg="yellow"), nl=False) + time.sleep(backoff_factor * (2**attempt)) + else: + raise APIRequestError( + click.style( + f"\nRequest to app_variant endpoint failed with status code {response.status_code} and error message: {e}.", + fg="red", + ) + ) + except Exception as e: + raise APIRequestError( + click.style(f"\nAn unexpected error occurred: {e}", fg="red") + ) diff --git a/agenta-cli/debugging/simple-app/agenta/client/api_models.py b/agenta-cli/debugging/simple-app/agenta/client/api_models.py new file mode 100644 index 0000000000..6bd5233566 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/api_models.py @@ -0,0 +1,34 @@ +from pydantic import BaseModel +from typing import List, Optional, Dict, Any + + +class AppVariant(BaseModel): + app_id: str + app_name: str + variant_name: str + variant_id: str + parameters: Optional[Dict[str, Any]] + previous_variant_name: Optional[str] + base_name: Optional[str] + config_name: Optional[str] + + +class Variant(BaseModel): + variant_id: str + + +class Image(BaseModel): + type: Optional[str] + docker_id: str + tags: str + + +class URI(BaseModel): + uri: str + + +class VariantConfigPayload(BaseModel): + base_id: str + config_name: str + parameters: Dict[str, Any] + overwrite: bool diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/__init__.py new file mode 100644 index 0000000000..8907b11d29 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/__init__.py @@ -0,0 +1,257 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + AgentaNodeDto, + AgentaNodeDtoNodesValue, + AgentaNodesResponse, + AgentaRootDto, + AgentaRootsResponse, + AgentaTreeDto, + AgentaTreesResponse, + AggregatedResult, + AggregatedResultEvaluatorConfig, + App, + AppVariantResponse, + AppVariantRevision, + BaseOutput, + BodyImportTestset, + CollectStatusResponse, + ConfigDb, + ConfigDto, + ConfigResponseModel, + CorrectAnswer, + CreateAppOutput, + CreateSpan, + CreateTraceResponse, + DockerEnvVars, + EnvironmentOutput, + EnvironmentOutputExtended, + EnvironmentRevision, + Error, + Evaluation, + EvaluationScenario, + EvaluationScenarioInput, + EvaluationScenarioOutput, + EvaluationScenarioResult, + EvaluationScenarioScoreUpdate, + EvaluationStatusEnum, + EvaluationType, + Evaluator, + EvaluatorConfig, + EvaluatorMappingOutputInterface, + EvaluatorOutputInterface, + ExceptionDto, + GetConfigResponse, + HttpValidationError, + HumanEvaluation, + HumanEvaluationScenario, + HumanEvaluationScenarioInput, + HumanEvaluationScenarioOutput, + HumanEvaluationScenarioUpdate, + HumanEvaluationUpdate, + Image, + InviteRequest, + LifecycleDto, + LinkDto, + ListApiKeysResponse, + LlmRunRateLimit, + LlmTokens, + LmProvidersEnum, + NewHumanEvaluation, + NewTestset, + NodeDto, + NodeType, + OTelContextDto, + OTelEventDto, + OTelExtraDto, + OTelLinkDto, + OTelSpanDto, + OTelSpanKind, + OTelSpansResponse, + OTelStatusCode, + Organization, + OrganizationOutput, + Outputs, + ParentDto, + Permission, + ReferenceDto, + ReferenceRequestModel, + Result, + RootDto, + Score, + SimpleEvaluationOutput, + Span, + SpanDetail, + SpanDto, + SpanDtoNodesValue, + SpanStatusCode, + SpanVariant, + StatusCode, + StatusDto, + Template, + TemplateImageInfo, + TestSetOutputResponse, + TestSetSimpleResponse, + TimeDto, + TraceDetail, + TreeDto, + TreeType, + UpdateAppOutput, + Uri, + ValidationError, + ValidationErrorLocItem, + VariantAction, + VariantActionEnum, + WithPagination, + WorkspaceMemberResponse, + WorkspacePermission, + WorkspaceResponse, + WorkspaceRole, + WorkspaceRoleResponse, +) +from .errors import UnprocessableEntityError +from . import ( + apps, + bases, + configs, + containers, + environments, + evaluations, + evaluators, + observability, + observability_v_1, + testsets, + variants, +) +from .client import AgentaApi, AsyncAgentaApi +from .containers import ContainerTemplatesResponse +from .observability_v_1 import Format, QueryTracesResponse +from .variants import AddVariantFromBaseAndConfigResponse + +__all__ = [ + "AddVariantFromBaseAndConfigResponse", + "AgentaApi", + "AgentaNodeDto", + "AgentaNodeDtoNodesValue", + "AgentaNodesResponse", + "AgentaRootDto", + "AgentaRootsResponse", + "AgentaTreeDto", + "AgentaTreesResponse", + "AggregatedResult", + "AggregatedResultEvaluatorConfig", + "App", + "AppVariantResponse", + "AppVariantRevision", + "AsyncAgentaApi", + "BaseOutput", + "BodyImportTestset", + "CollectStatusResponse", + "ConfigDb", + "ConfigDto", + "ConfigResponseModel", + "ContainerTemplatesResponse", + "CorrectAnswer", + "CreateAppOutput", + "CreateSpan", + "CreateTraceResponse", + "DockerEnvVars", + "EnvironmentOutput", + "EnvironmentOutputExtended", + "EnvironmentRevision", + "Error", + "Evaluation", + "EvaluationScenario", + "EvaluationScenarioInput", + "EvaluationScenarioOutput", + "EvaluationScenarioResult", + "EvaluationScenarioScoreUpdate", + "EvaluationStatusEnum", + "EvaluationType", + "Evaluator", + "EvaluatorConfig", + "EvaluatorMappingOutputInterface", + "EvaluatorOutputInterface", + "ExceptionDto", + "Format", + "GetConfigResponse", + "HttpValidationError", + "HumanEvaluation", + "HumanEvaluationScenario", + "HumanEvaluationScenarioInput", + "HumanEvaluationScenarioOutput", + "HumanEvaluationScenarioUpdate", + "HumanEvaluationUpdate", + "Image", + "InviteRequest", + "LifecycleDto", + "LinkDto", + "ListApiKeysResponse", + "LlmRunRateLimit", + "LlmTokens", + "LmProvidersEnum", + "NewHumanEvaluation", + "NewTestset", + "NodeDto", + "NodeType", + "OTelContextDto", + "OTelEventDto", + "OTelExtraDto", + "OTelLinkDto", + "OTelSpanDto", + "OTelSpanKind", + "OTelSpansResponse", + "OTelStatusCode", + "Organization", + "OrganizationOutput", + "Outputs", + "ParentDto", + "Permission", + "QueryTracesResponse", + "ReferenceDto", + "ReferenceRequestModel", + "Result", + "RootDto", + "Score", + "SimpleEvaluationOutput", + "Span", + "SpanDetail", + "SpanDto", + "SpanDtoNodesValue", + "SpanStatusCode", + "SpanVariant", + "StatusCode", + "StatusDto", + "Template", + "TemplateImageInfo", + "TestSetOutputResponse", + "TestSetSimpleResponse", + "TimeDto", + "TraceDetail", + "TreeDto", + "TreeType", + "UnprocessableEntityError", + "UpdateAppOutput", + "Uri", + "ValidationError", + "ValidationErrorLocItem", + "VariantAction", + "VariantActionEnum", + "WithPagination", + "WorkspaceMemberResponse", + "WorkspacePermission", + "WorkspaceResponse", + "WorkspaceRole", + "WorkspaceRoleResponse", + "apps", + "bases", + "configs", + "containers", + "environments", + "evaluations", + "evaluators", + "observability", + "observability_v_1", + "testsets", + "variants", +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/apps/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/apps/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/apps/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/apps/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/apps/client.py new file mode 100644 index 0000000000..b709a8538e --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/apps/client.py @@ -0,0 +1,1631 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.app_variant_response import AppVariantResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.app import App +from ..types.create_app_output import CreateAppOutput +from ..types.update_app_output import UpdateAppOutput +from ..types.environment_output import EnvironmentOutput +from ..types.environment_output_extended import EnvironmentOutputExtended +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AppsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list_app_variants( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[AppVariantResponse]: + """ + Retrieve a list of app variants for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve variants for. + stoken_session (SessionContainer, optional): The session container to verify the user's session. Defaults to Depends(verify_session()). + + Returns: + List[AppVariantResponse]: A list of app variants for the given app ID. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.list_app_variants( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variants", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantResponse], + parse_obj_as( + type_=typing.List[AppVariantResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant_by_env( + self, + *, + app_id: str, + environment: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Retrieve the app variant based on the provided app_id and environment. + + Args: + app_id (str): The ID of the app to retrieve the variant for. + environment (str): The environment of the app variant to retrieve. + stoken_session (SessionContainer, optional): The session token container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the app variant is not found (status_code=500), or if a ValueError is raised (status_code=400), or if any other exception is raised (status_code=500). + + Returns: + AppVariantResponse: The retrieved app variant. + + Parameters + ---------- + app_id : str + + environment : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.get_variant_by_env( + app_id="app_id", + environment="environment", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "apps/get_variant_by_env", + method="GET", + params={ + "app_id": app_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_apps( + self, + *, + app_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[App]: + """ + Retrieve a list of apps filtered by app_name. + + Args: + app_name (Optional[str]): The name of the app to filter by. + stoken_session (SessionContainer): The session container. + + Returns: + List[App]: A list of apps filtered by app_name. + + Raises: + HTTPException: If there was an error retrieving the list of apps. + + Parameters + ---------- + app_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[App] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.list_apps() + """ + _response = self._client_wrapper.httpx_client.request( + "apps", + method="GET", + params={"app_name": app_name}, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[App], + parse_obj_as( + type_=typing.List[App], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_app( + self, + *, + app_name: str, + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAppOutput: + """ + Create a new app for a user. + + Args: + payload (CreateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + CreateAppOutput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_name : str + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAppOutput + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.create_app( + app_name="app_name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "apps", + method="POST", + json={ + "app_name": app_name, + "project_id": project_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAppOutput, + parse_obj_as( + type_=CreateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_app( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Remove app, all its variant, containers and images + + Arguments: + app -- App to remove + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.remove_app( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_app( + self, + app_id: str, + *, + app_name: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> UpdateAppOutput: + """ + Update an app for a user. + + Args: + app_id (str): The ID of the app. + payload (UpdateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + UpdateAppOuput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_id : str + + app_name : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + UpdateAppOutput + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.update_app( + app_id="app_id", + app_name="app_name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="PATCH", + json={ + "app_name": app_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + UpdateAppOutput, + parse_obj_as( + type_=UpdateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_variant_from_image( + self, + app_id: str, + *, + variant_name: str, + docker_id: str, + tags: str, + base_name: typing.Optional[str] = OMIT, + config_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Add a new variant to an app based on a Docker image. + + Args: + app_id (str): The ID of the app to add the variant to. + payload (AddVariantFromImagePayload): The payload containing information about the variant to add. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the feature flag is set to "demo" or if the image does not have a tag starting with the registry name (agenta-server) or if the image is not found or if the user does not have access to the app. + + Returns: + dict: The newly added variant. + + Parameters + ---------- + app_id : str + + variant_name : str + + docker_id : str + + tags : str + + base_name : typing.Optional[str] + + config_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.add_variant_from_image( + app_id="app_id", + variant_name="variant_name", + docker_id="docker_id", + tags="tags", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variant/from-image", + method="POST", + json={ + "variant_name": variant_name, + "docker_id": docker_id, + "tags": tags, + "base_name": base_name, + "config_name": config_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_app_and_variant_from_template( + self, + *, + app_name: str, + template_id: str, + env_vars: typing.Dict[str, str], + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Create an app and variant from a template. + + Args: + payload (CreateAppVariant): The payload containing the app and variant information. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the user has reached the app limit or if an app with the same name already exists. + + Returns: + AppVariantResponse: The output of the created app variant. + + Parameters + ---------- + app_name : str + + template_id : str + + env_vars : typing.Dict[str, str] + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.create_app_and_variant_from_template( + app_name="app_name", + template_id="template_id", + env_vars={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "apps/app_and_variant_from_template", + method="POST", + json={ + "app_name": app_name, + "template_id": template_id, + "project_id": project_id, + "env_vars": env_vars, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EnvironmentOutput]: + """ + Retrieve a list of environments for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve environments for. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + List[EnvironmentOutput]: A list of environment objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EnvironmentOutput] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.list_environments( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EnvironmentOutput], + parse_obj_as( + type_=typing.List[EnvironmentOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def environment_revisions( + self, + app_id: str, + environment_name: typing.Optional[typing.Any], + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EnvironmentOutputExtended: + """ + Parameters + ---------- + app_id : str + + environment_name : typing.Optional[typing.Any] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EnvironmentOutputExtended + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.environment_revisions( + app_id="string", + environment_name={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/revisions/{jsonable_encoder(environment_name)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EnvironmentOutputExtended, + parse_obj_as( + type_=EnvironmentOutputExtended, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncAppsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list_app_variants( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[AppVariantResponse]: + """ + Retrieve a list of app variants for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve variants for. + stoken_session (SessionContainer, optional): The session container to verify the user's session. Defaults to Depends(verify_session()). + + Returns: + List[AppVariantResponse]: A list of app variants for the given app ID. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.list_app_variants( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variants", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantResponse], + parse_obj_as( + type_=typing.List[AppVariantResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant_by_env( + self, + *, + app_id: str, + environment: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Retrieve the app variant based on the provided app_id and environment. + + Args: + app_id (str): The ID of the app to retrieve the variant for. + environment (str): The environment of the app variant to retrieve. + stoken_session (SessionContainer, optional): The session token container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the app variant is not found (status_code=500), or if a ValueError is raised (status_code=400), or if any other exception is raised (status_code=500). + + Returns: + AppVariantResponse: The retrieved app variant. + + Parameters + ---------- + app_id : str + + environment : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.get_variant_by_env( + app_id="app_id", + environment="environment", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps/get_variant_by_env", + method="GET", + params={ + "app_id": app_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_apps( + self, + *, + app_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[App]: + """ + Retrieve a list of apps filtered by app_name. + + Args: + app_name (Optional[str]): The name of the app to filter by. + stoken_session (SessionContainer): The session container. + + Returns: + List[App]: A list of apps filtered by app_name. + + Raises: + HTTPException: If there was an error retrieving the list of apps. + + Parameters + ---------- + app_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[App] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.list_apps() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps", + method="GET", + params={"app_name": app_name}, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[App], + parse_obj_as( + type_=typing.List[App], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_app( + self, + *, + app_name: str, + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAppOutput: + """ + Create a new app for a user. + + Args: + payload (CreateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + CreateAppOutput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_name : str + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAppOutput + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.create_app( + app_name="app_name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps", + method="POST", + json={ + "app_name": app_name, + "project_id": project_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAppOutput, + parse_obj_as( + type_=CreateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_app( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Remove app, all its variant, containers and images + + Arguments: + app -- App to remove + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.remove_app( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_app( + self, + app_id: str, + *, + app_name: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> UpdateAppOutput: + """ + Update an app for a user. + + Args: + app_id (str): The ID of the app. + payload (UpdateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + UpdateAppOuput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_id : str + + app_name : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + UpdateAppOutput + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.update_app( + app_id="app_id", + app_name="app_name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="PATCH", + json={ + "app_name": app_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + UpdateAppOutput, + parse_obj_as( + type_=UpdateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_variant_from_image( + self, + app_id: str, + *, + variant_name: str, + docker_id: str, + tags: str, + base_name: typing.Optional[str] = OMIT, + config_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Add a new variant to an app based on a Docker image. + + Args: + app_id (str): The ID of the app to add the variant to. + payload (AddVariantFromImagePayload): The payload containing information about the variant to add. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the feature flag is set to "demo" or if the image does not have a tag starting with the registry name (agenta-server) or if the image is not found or if the user does not have access to the app. + + Returns: + dict: The newly added variant. + + Parameters + ---------- + app_id : str + + variant_name : str + + docker_id : str + + tags : str + + base_name : typing.Optional[str] + + config_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.add_variant_from_image( + app_id="app_id", + variant_name="variant_name", + docker_id="docker_id", + tags="tags", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variant/from-image", + method="POST", + json={ + "variant_name": variant_name, + "docker_id": docker_id, + "tags": tags, + "base_name": base_name, + "config_name": config_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_app_and_variant_from_template( + self, + *, + app_name: str, + template_id: str, + env_vars: typing.Dict[str, str], + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Create an app and variant from a template. + + Args: + payload (CreateAppVariant): The payload containing the app and variant information. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the user has reached the app limit or if an app with the same name already exists. + + Returns: + AppVariantResponse: The output of the created app variant. + + Parameters + ---------- + app_name : str + + template_id : str + + env_vars : typing.Dict[str, str] + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.create_app_and_variant_from_template( + app_name="app_name", + template_id="template_id", + env_vars={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps/app_and_variant_from_template", + method="POST", + json={ + "app_name": app_name, + "template_id": template_id, + "project_id": project_id, + "env_vars": env_vars, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EnvironmentOutput]: + """ + Retrieve a list of environments for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve environments for. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + List[EnvironmentOutput]: A list of environment objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EnvironmentOutput] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.list_environments( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EnvironmentOutput], + parse_obj_as( + type_=typing.List[EnvironmentOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def environment_revisions( + self, + app_id: str, + environment_name: typing.Optional[typing.Any], + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EnvironmentOutputExtended: + """ + Parameters + ---------- + app_id : str + + environment_name : typing.Optional[typing.Any] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EnvironmentOutputExtended + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.environment_revisions( + app_id="string", + environment_name={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/revisions/{jsonable_encoder(environment_name)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EnvironmentOutputExtended, + parse_obj_as( + type_=EnvironmentOutputExtended, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/bases/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/bases/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/bases/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/bases/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/bases/client.py new file mode 100644 index 0000000000..3e16b47b10 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/bases/client.py @@ -0,0 +1,190 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import SyncClientWrapper +import typing +from ..core.request_options import RequestOptions +from ..types.base_output import BaseOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + + +class BasesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list_bases( + self, + *, + app_id: str, + base_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[BaseOutput]: + """ + Retrieve a list of bases filtered by app_id and base_name. + + Args: + request (Request): The incoming request. + app_id (str): The ID of the app to filter by. + base_name (Optional[str], optional): The name of the base to filter by. Defaults to None. + + Returns: + List[BaseOutput]: A list of BaseOutput objects representing the filtered bases. + + Raises: + HTTPException: If there was an error retrieving the bases. + + Parameters + ---------- + app_id : str + + base_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[BaseOutput] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.bases.list_bases( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "bases", + method="GET", + params={ + "app_id": app_id, + "base_name": base_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[BaseOutput], + parse_obj_as( + type_=typing.List[BaseOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncBasesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list_bases( + self, + *, + app_id: str, + base_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[BaseOutput]: + """ + Retrieve a list of bases filtered by app_id and base_name. + + Args: + request (Request): The incoming request. + app_id (str): The ID of the app to filter by. + base_name (Optional[str], optional): The name of the base to filter by. Defaults to None. + + Returns: + List[BaseOutput]: A list of BaseOutput objects representing the filtered bases. + + Raises: + HTTPException: If there was an error retrieving the bases. + + Parameters + ---------- + app_id : str + + base_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[BaseOutput] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.bases.list_bases( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "bases", + method="GET", + params={ + "app_id": app_id, + "base_name": base_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[BaseOutput], + parse_obj_as( + type_=typing.List[BaseOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/client.py new file mode 100644 index 0000000000..9fc9784d8a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/client.py @@ -0,0 +1,3275 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +import httpx +from .core.client_wrapper import SyncClientWrapper +from .observability.client import ObservabilityClient +from .apps.client import AppsClient +from .variants.client import VariantsClient +from .evaluations.client import EvaluationsClient +from .evaluators.client import EvaluatorsClient +from .testsets.client import TestsetsClient +from .containers.client import ContainersClient +from .environments.client import EnvironmentsClient +from .bases.client import BasesClient +from .configs.client import ConfigsClient +from .observability_v_1.client import ObservabilityV1Client +from .core.request_options import RequestOptions +from .types.list_api_keys_response import ListApiKeysResponse +from .core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from .core.api_error import ApiError +from .errors.unprocessable_entity_error import UnprocessableEntityError +from .types.http_validation_error import HttpValidationError +from .core.jsonable_encoder import jsonable_encoder +from .types.organization import Organization +from .types.organization_output import OrganizationOutput +from .types.invite_request import InviteRequest +from .core.serialization import convert_and_respect_annotation_metadata +from .types.workspace_response import WorkspaceResponse +import datetime as dt +from .types.workspace_role_response import WorkspaceRoleResponse +from .types.permission import Permission +from .core.client_wrapper import AsyncClientWrapper +from .observability.client import AsyncObservabilityClient +from .apps.client import AsyncAppsClient +from .variants.client import AsyncVariantsClient +from .evaluations.client import AsyncEvaluationsClient +from .evaluators.client import AsyncEvaluatorsClient +from .testsets.client import AsyncTestsetsClient +from .containers.client import AsyncContainersClient +from .environments.client import AsyncEnvironmentsClient +from .bases.client import AsyncBasesClient +from .configs.client import AsyncConfigsClient +from .observability_v_1.client import AsyncObservabilityV1Client + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AgentaApi: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : str + The base url to use for requests from the client. + + api_key : str + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.Client] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + """ + + def __init__( + self, + *, + base_url: str, + api_key: str, + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.Client] = None, + ): + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else None + ) + self._client_wrapper = SyncClientWrapper( + base_url=base_url, + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.Client( + timeout=_defaulted_timeout, follow_redirects=follow_redirects + ) + if follow_redirects is not None + else httpx.Client(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.observability = ObservabilityClient(client_wrapper=self._client_wrapper) + self.apps = AppsClient(client_wrapper=self._client_wrapper) + self.variants = VariantsClient(client_wrapper=self._client_wrapper) + self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper) + self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper) + self.testsets = TestsetsClient(client_wrapper=self._client_wrapper) + self.containers = ContainersClient(client_wrapper=self._client_wrapper) + self.environments = EnvironmentsClient(client_wrapper=self._client_wrapper) + self.bases = BasesClient(client_wrapper=self._client_wrapper) + self.configs = ConfigsClient(client_wrapper=self._client_wrapper) + self.observability_v_1 = ObservabilityV1Client( + client_wrapper=self._client_wrapper + ) + + def list_api_keys( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[ListApiKeysResponse]: + """ + List all API keys associated with the authenticated user. + + Args: + request (Request): The incoming request object. + + Returns: + List[ListAPIKeysResponse]: A list of API Keys associated with the user. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ListApiKeysResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.list_api_keys() + """ + _response = self._client_wrapper.httpx_client.request( + "keys", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ListApiKeysResponse], + parse_obj_as( + type_=typing.List[ListApiKeysResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_api_key( + self, + *, + workspace_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Creates an API key for a user. + + Args: + request (Request): The request object containing the user ID in the request state. + + Returns: + str: The created API key. + + Parameters + ---------- + workspace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.create_api_key( + workspace_id="workspace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "keys", + method="POST", + params={ + "workspace_id": workspace_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + str, + parse_obj_as( + type_=str, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Delete an API key with the given key prefix for the authenticated user. + + Args: + key_prefix (str): The prefix of the API key to be deleted. + request (Request): The incoming request object. + + Returns: + dict: A dictionary containing a success message upon successful deletion. + + Raises: + HTTPException: If the API key is not found or does not belong to the user. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.delete_api_key( + key_prefix="key_prefix", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def validate_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + This Function is called by the CLI and is used to validate an API key provided by a user in agenta init setup. + Returns: + bool: True. If the request reaches this point, the API key is valid. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.validate_api_key( + key_prefix="key_prefix", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}/validate", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_organizations( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Organization]: + """ + Returns a list of organizations associated with the user's session. + + Args: + stoken_session (SessionContainer): The user's session token. + + Returns: + list[Organization]: A list of organizations associated with the user's session. + + Raises: + HTTPException: If there is an error retrieving the organizations from the database. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Organization] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.list_organizations() + """ + _response = self._client_wrapper.httpx_client.request( + "organizations", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Organization], + parse_obj_as( + type_=typing.List[Organization], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_organization( + self, + *, + name: str, + owner: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + name : str + + owner : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.create_organization( + name="name", + owner="owner", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "organizations", + method="POST", + json={ + "name": name, + "owner": owner, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_own_org( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> OrganizationOutput: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + OrganizationOutput + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.get_own_org() + """ + _response = self._client_wrapper.httpx_client.request( + "organizations/own", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + OrganizationOutput, + parse_obj_as( + type_=OrganizationOutput, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_organization_details( + self, org_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Get an organization's details. + + Raises: + HTTPException: _description_ + Permission Denied + + Returns: + OrganizationDB Instance + + Parameters + ---------- + org_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.fetch_organization_details( + org_id="org_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_organization( + self, + org_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.update_organization( + org_id="org_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def invite_user_to_workspace( + self, + org_id: str, + workspace_id: str, + *, + request: typing.Sequence[InviteRequest], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + request : typing.Sequence[InviteRequest] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi, InviteRequest + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.invite_user_to_workspace( + org_id="org_id", + workspace_id="workspace_id", + request=[ + InviteRequest( + email="email", + roles=["roles"], + ) + ], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, + annotation=typing.Sequence[InviteRequest], + direction="write", + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def resend_invitation( + self, + org_id: str, + workspace_id: str, + *, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Resend an invitation to a user to an Organization. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Resent invitation to user; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.resend_invitation( + org_id="org_id", + workspace_id="workspace_id", + email="email", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/resend", + method="POST", + json={ + "email": email, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def accept_invitation( + self, + org_id: str, + workspace_id: str, + *, + project_id: str, + token: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Accept an invitation to a workspace. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Accepted invitation to workspace; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + project_id : str + + token : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.accept_invitation( + org_id="org_id", + workspace_id="workspace_id", + project_id="project_id", + token="token", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/accept", + method="POST", + params={ + "project_id": project_id, + }, + json={ + "token": token, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_workspace( + self, + org_id: str, + *, + name: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + name : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.create_workspace( + org_id="org_id", + name="name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces", + method="POST", + json={ + "name": name, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_workspace( + self, + org_id: str, + workspace_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[dt.datetime] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[dt.datetime] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.update_workspace( + org_id="org_id", + workspace_id="workspace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_all_workspace_roles( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[WorkspaceRoleResponse]: + """ + Get all workspace roles. + + Returns a list of all available workspace roles. + + Returns: + List[WorkspaceRoleResponse]: A list of WorkspaceRole objects representing the available workspace roles. + + Raises: + HTTPException: If an error occurs while retrieving the workspace roles. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[WorkspaceRoleResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.get_all_workspace_roles() + """ + _response = self._client_wrapper.httpx_client.request( + "workspaces/roles", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[WorkspaceRoleResponse], + parse_obj_as( + type_=typing.List[WorkspaceRoleResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_all_workspace_permissions( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Permission]: + """ + Get all workspace permissions. + + Returns a list of all available workspace permissions. + + Returns: + List[Permission]: A list of Permission objects representing the available workspace permissions. + + Raises: + HTTPException: If there is an error retrieving the workspace permissions. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Permission] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.get_all_workspace_permissions() + """ + _response = self._client_wrapper.httpx_client.request( + "workspaces/permissions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Permission], + parse_obj_as( + type_=typing.List[Permission], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def assign_role_to_user( + self, + workspace_id: str, + *, + email: str, + organization_id: str, + role: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Assigns a role to a user in a workspace. + + Args: + payload (UserRole): The payload containing the organization id, user email, and role to assign. + workspace_id (str): The ID of the workspace. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role was successfully assigned, False otherwise. + + Raises: + HTTPException: If the user does not have permission to perform this action. + HTTPException: If there is an error assigning the role to the user. + + Parameters + ---------- + workspace_id : str + + email : str + + organization_id : str + + role : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.assign_role_to_user( + workspace_id="workspace_id", + email="email", + organization_id="organization_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="POST", + json={ + "email": email, + "organization_id": organization_id, + "role": role, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def unassign_role_from_user( + self, + workspace_id: str, + *, + email: str, + org_id: str, + role: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Delete a role assignment from a user in a workspace. + + Args: + workspace_id (str): The ID of the workspace. + email (str): The email of the user to remove the role from. + org_id (str): The ID of the organization. + role (str): The role to remove from the user. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role assignment was successfully deleted. + + Raises: + HTTPException: If there is an error in the request or the user does not have permission to perform the action. + HTTPException: If there is an error in updating the user's roles. + + Parameters + ---------- + workspace_id : str + + email : str + + org_id : str + + role : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.unassign_role_from_user( + workspace_id="workspace_id", + email="email", + org_id="org_id", + role="role", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="DELETE", + params={ + "email": email, + "org_id": org_id, + "role": role, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_user_from_workspace( + self, + workspace_id: str, + *, + org_id: str, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Remove a user from a workspace. + + Parameters: + + - payload (UserRole): The payload containing the user email and organization ID. + - workspace_id (str): The ID of the workspace. + - request (Request): The FastAPI request object. + + Returns: + + - WorkspaceResponse: The updated workspace. + + Raises: + + - HTTPException: If the user does not have permission to perform this action. + - HTTPException: If there is an error during the removal process. + + Parameters + ---------- + workspace_id : str + + org_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.remove_user_from_workspace( + workspace_id="workspace_id", + org_id="org_id", + email="email", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/users", + method="DELETE", + params={ + "org_id": org_id, + "email": email, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def health_check( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.health_check() + """ + _response = self._client_wrapper.httpx_client.request( + "health", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_user_profile( + self, + *, + user_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + user_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.fetch_user_profile() + """ + _response = self._client_wrapper.httpx_client.request( + "profile", + method="GET", + params={ + "user_id": user_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncAgentaApi: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : str + The base url to use for requests from the client. + + api_key : str + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + """ + + def __init__( + self, + *, + base_url: str, + api_key: str, + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None, + ): + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else None + ) + self._client_wrapper = AsyncClientWrapper( + base_url=base_url, + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient( + timeout=_defaulted_timeout, follow_redirects=follow_redirects + ) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.observability = AsyncObservabilityClient( + client_wrapper=self._client_wrapper + ) + self.apps = AsyncAppsClient(client_wrapper=self._client_wrapper) + self.variants = AsyncVariantsClient(client_wrapper=self._client_wrapper) + self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper) + self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper) + self.testsets = AsyncTestsetsClient(client_wrapper=self._client_wrapper) + self.containers = AsyncContainersClient(client_wrapper=self._client_wrapper) + self.environments = AsyncEnvironmentsClient(client_wrapper=self._client_wrapper) + self.bases = AsyncBasesClient(client_wrapper=self._client_wrapper) + self.configs = AsyncConfigsClient(client_wrapper=self._client_wrapper) + self.observability_v_1 = AsyncObservabilityV1Client( + client_wrapper=self._client_wrapper + ) + + async def list_api_keys( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[ListApiKeysResponse]: + """ + List all API keys associated with the authenticated user. + + Args: + request (Request): The incoming request object. + + Returns: + List[ListAPIKeysResponse]: A list of API Keys associated with the user. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ListApiKeysResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.list_api_keys() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "keys", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ListApiKeysResponse], + parse_obj_as( + type_=typing.List[ListApiKeysResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_api_key( + self, + *, + workspace_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Creates an API key for a user. + + Args: + request (Request): The request object containing the user ID in the request state. + + Returns: + str: The created API key. + + Parameters + ---------- + workspace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.create_api_key( + workspace_id="workspace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "keys", + method="POST", + params={ + "workspace_id": workspace_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + str, + parse_obj_as( + type_=str, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Delete an API key with the given key prefix for the authenticated user. + + Args: + key_prefix (str): The prefix of the API key to be deleted. + request (Request): The incoming request object. + + Returns: + dict: A dictionary containing a success message upon successful deletion. + + Raises: + HTTPException: If the API key is not found or does not belong to the user. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.delete_api_key( + key_prefix="key_prefix", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def validate_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + This Function is called by the CLI and is used to validate an API key provided by a user in agenta init setup. + Returns: + bool: True. If the request reaches this point, the API key is valid. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.validate_api_key( + key_prefix="key_prefix", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}/validate", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_organizations( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Organization]: + """ + Returns a list of organizations associated with the user's session. + + Args: + stoken_session (SessionContainer): The user's session token. + + Returns: + list[Organization]: A list of organizations associated with the user's session. + + Raises: + HTTPException: If there is an error retrieving the organizations from the database. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Organization] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.list_organizations() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "organizations", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Organization], + parse_obj_as( + type_=typing.List[Organization], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_organization( + self, + *, + name: str, + owner: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + name : str + + owner : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.create_organization( + name="name", + owner="owner", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "organizations", + method="POST", + json={ + "name": name, + "owner": owner, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_own_org( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> OrganizationOutput: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + OrganizationOutput + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.get_own_org() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "organizations/own", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + OrganizationOutput, + parse_obj_as( + type_=OrganizationOutput, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_organization_details( + self, org_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Get an organization's details. + + Raises: + HTTPException: _description_ + Permission Denied + + Returns: + OrganizationDB Instance + + Parameters + ---------- + org_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.fetch_organization_details( + org_id="org_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_organization( + self, + org_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.update_organization( + org_id="org_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def invite_user_to_workspace( + self, + org_id: str, + workspace_id: str, + *, + request: typing.Sequence[InviteRequest], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + request : typing.Sequence[InviteRequest] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, InviteRequest + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.invite_user_to_workspace( + org_id="org_id", + workspace_id="workspace_id", + request=[ + InviteRequest( + email="email", + roles=["roles"], + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, + annotation=typing.Sequence[InviteRequest], + direction="write", + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def resend_invitation( + self, + org_id: str, + workspace_id: str, + *, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Resend an invitation to a user to an Organization. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Resent invitation to user; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.resend_invitation( + org_id="org_id", + workspace_id="workspace_id", + email="email", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/resend", + method="POST", + json={ + "email": email, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def accept_invitation( + self, + org_id: str, + workspace_id: str, + *, + project_id: str, + token: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Accept an invitation to a workspace. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Accepted invitation to workspace; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + project_id : str + + token : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.accept_invitation( + org_id="org_id", + workspace_id="workspace_id", + project_id="project_id", + token="token", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/accept", + method="POST", + params={ + "project_id": project_id, + }, + json={ + "token": token, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_workspace( + self, + org_id: str, + *, + name: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + name : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.create_workspace( + org_id="org_id", + name="name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces", + method="POST", + json={ + "name": name, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_workspace( + self, + org_id: str, + workspace_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[dt.datetime] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[dt.datetime] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.update_workspace( + org_id="org_id", + workspace_id="workspace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_all_workspace_roles( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[WorkspaceRoleResponse]: + """ + Get all workspace roles. + + Returns a list of all available workspace roles. + + Returns: + List[WorkspaceRoleResponse]: A list of WorkspaceRole objects representing the available workspace roles. + + Raises: + HTTPException: If an error occurs while retrieving the workspace roles. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[WorkspaceRoleResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.get_all_workspace_roles() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "workspaces/roles", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[WorkspaceRoleResponse], + parse_obj_as( + type_=typing.List[WorkspaceRoleResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_all_workspace_permissions( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Permission]: + """ + Get all workspace permissions. + + Returns a list of all available workspace permissions. + + Returns: + List[Permission]: A list of Permission objects representing the available workspace permissions. + + Raises: + HTTPException: If there is an error retrieving the workspace permissions. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Permission] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.get_all_workspace_permissions() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "workspaces/permissions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Permission], + parse_obj_as( + type_=typing.List[Permission], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def assign_role_to_user( + self, + workspace_id: str, + *, + email: str, + organization_id: str, + role: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Assigns a role to a user in a workspace. + + Args: + payload (UserRole): The payload containing the organization id, user email, and role to assign. + workspace_id (str): The ID of the workspace. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role was successfully assigned, False otherwise. + + Raises: + HTTPException: If the user does not have permission to perform this action. + HTTPException: If there is an error assigning the role to the user. + + Parameters + ---------- + workspace_id : str + + email : str + + organization_id : str + + role : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.assign_role_to_user( + workspace_id="workspace_id", + email="email", + organization_id="organization_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="POST", + json={ + "email": email, + "organization_id": organization_id, + "role": role, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def unassign_role_from_user( + self, + workspace_id: str, + *, + email: str, + org_id: str, + role: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Delete a role assignment from a user in a workspace. + + Args: + workspace_id (str): The ID of the workspace. + email (str): The email of the user to remove the role from. + org_id (str): The ID of the organization. + role (str): The role to remove from the user. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role assignment was successfully deleted. + + Raises: + HTTPException: If there is an error in the request or the user does not have permission to perform the action. + HTTPException: If there is an error in updating the user's roles. + + Parameters + ---------- + workspace_id : str + + email : str + + org_id : str + + role : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.unassign_role_from_user( + workspace_id="workspace_id", + email="email", + org_id="org_id", + role="role", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="DELETE", + params={ + "email": email, + "org_id": org_id, + "role": role, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_user_from_workspace( + self, + workspace_id: str, + *, + org_id: str, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Remove a user from a workspace. + + Parameters: + + - payload (UserRole): The payload containing the user email and organization ID. + - workspace_id (str): The ID of the workspace. + - request (Request): The FastAPI request object. + + Returns: + + - WorkspaceResponse: The updated workspace. + + Raises: + + - HTTPException: If the user does not have permission to perform this action. + - HTTPException: If there is an error during the removal process. + + Parameters + ---------- + workspace_id : str + + org_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.remove_user_from_workspace( + workspace_id="workspace_id", + org_id="org_id", + email="email", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/users", + method="DELETE", + params={ + "org_id": org_id, + "email": email, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def health_check( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.health_check() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "health", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_user_profile( + self, + *, + user_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + user_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.fetch_user_profile() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "profile", + method="GET", + params={ + "user_id": user_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/configs/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/configs/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/configs/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/configs/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/configs/client.py new file mode 100644 index 0000000000..e034ff37b0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/configs/client.py @@ -0,0 +1,598 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.get_config_response import GetConfigResponse +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.jsonable_encoder import jsonable_encoder +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ConfigsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get_config( + self, + *, + base_id: str, + config_name: typing.Optional[str] = None, + environment_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConfigResponse: + """ + Parameters + ---------- + base_id : str + + config_name : typing.Optional[str] + + environment_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConfigResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.get_config( + base_id="base_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "configs", + method="GET", + params={ + "base_id": base_id, + "config_name": config_name, + "environment_name": environment_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConfigResponse, + parse_obj_as( + type_=GetConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def save_config( + self, + *, + base_id: str, + config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + overwrite: bool, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + base_id : str + + config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + overwrite : bool + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.save_config( + base_id="base_id", + config_name="config_name", + parameters={"key": "value"}, + overwrite=True, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "configs", + method="POST", + json={ + "base_id": base_id, + "config_name": config_name, + "parameters": parameters, + "overwrite": overwrite, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_config_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.get_config_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def revert_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.revert_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}/revert", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncConfigsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_config( + self, + *, + base_id: str, + config_name: typing.Optional[str] = None, + environment_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConfigResponse: + """ + Parameters + ---------- + base_id : str + + config_name : typing.Optional[str] + + environment_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConfigResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.get_config( + base_id="base_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "configs", + method="GET", + params={ + "base_id": base_id, + "config_name": config_name, + "environment_name": environment_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConfigResponse, + parse_obj_as( + type_=GetConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def save_config( + self, + *, + base_id: str, + config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + overwrite: bool, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + base_id : str + + config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + overwrite : bool + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.save_config( + base_id="base_id", + config_name="config_name", + parameters={"key": "value"}, + overwrite=True, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "configs", + method="POST", + json={ + "base_id": base_id, + "config_name": config_name, + "parameters": parameters, + "overwrite": overwrite, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_config_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.get_config_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def revert_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.revert_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}/revert", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/containers/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/__init__.py new file mode 100644 index 0000000000..3d1974f7ad --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ContainerTemplatesResponse + +__all__ = ["ContainerTemplatesResponse"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/containers/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/client.py new file mode 100644 index 0000000000..c7180c0a4b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/client.py @@ -0,0 +1,642 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .. import core +from ..core.request_options import RequestOptions +from ..types.image import Image +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .types.container_templates_response import ContainerTemplatesResponse +from ..types.uri import Uri +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ContainersClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def build_image( + self, + *, + app_id: str, + base_name: str, + tar_file: core.File, + request_options: typing.Optional[RequestOptions] = None, + ) -> Image: + """ + Builds a Docker image from a tar file containing the application code. + + Args: + app_id (str): The ID of the application to build the image for. + base_name (str): The base name of the image to build. + tar_file (UploadFile): The tar file containing the application code. + stoken_session (SessionContainer): The session container for the user making the request. + + Returns: + Image: The Docker image that was built. + + Parameters + ---------- + app_id : str + + base_name : str + + tar_file : core.File + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Image + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.build_image( + app_id="app_id", + base_name="base_name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "containers/build_image", + method="POST", + params={ + "app_id": app_id, + "base_name": base_name, + }, + data={}, + files={ + "tar_file": tar_file, + }, + request_options=( + {**request_options, "timeout_in_seconds": 600} + if request_options + else {"timeout_in_seconds": 600} + ), + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Image, + parse_obj_as( + type_=Image, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def restart_container( + self, + *, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Restart docker container. + + Args: + payload (RestartAppContainer) -- the required data (app_name and variant_name) + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.restart_container( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "containers/restart_container", + method="POST", + json={ + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def container_templates( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ContainerTemplatesResponse: + """ + Returns a list of templates available for creating new containers. + + Parameters: + stoken_session (SessionContainer): The session container for the user. + + Returns: + + Union[List[Template], str]: A list of templates or an error message. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ContainerTemplatesResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.container_templates() + """ + _response = self._client_wrapper.httpx_client.request( + "containers/templates", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ContainerTemplatesResponse, + parse_obj_as( + type_=ContainerTemplatesResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def construct_app_container_url( + self, + *, + base_id: typing.Optional[str] = None, + variant_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Constructs the URL for an app container based on the provided base_id or variant_id. + + Args: + base_id (Optional[str]): The ID of the base to use for the app container. + variant_id (Optional[str]): The ID of the variant to use for the app container. + request (Request): The request object. + + Returns: + URI: The URI for the app container. + + Raises: + HTTPException: If the base or variant cannot be found or the user does not have access. + + Parameters + ---------- + base_id : typing.Optional[str] + + variant_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.construct_app_container_url() + """ + _response = self._client_wrapper.httpx_client.request( + "containers/container_url", + method="GET", + params={ + "base_id": base_id, + "variant_id": variant_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncContainersClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def build_image( + self, + *, + app_id: str, + base_name: str, + tar_file: core.File, + request_options: typing.Optional[RequestOptions] = None, + ) -> Image: + """ + Builds a Docker image from a tar file containing the application code. + + Args: + app_id (str): The ID of the application to build the image for. + base_name (str): The base name of the image to build. + tar_file (UploadFile): The tar file containing the application code. + stoken_session (SessionContainer): The session container for the user making the request. + + Returns: + Image: The Docker image that was built. + + Parameters + ---------- + app_id : str + + base_name : str + + tar_file : core.File + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Image + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.build_image( + app_id="app_id", + base_name="base_name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/build_image", + method="POST", + params={ + "app_id": app_id, + "base_name": base_name, + }, + data={}, + files={ + "tar_file": tar_file, + }, + request_options=( + {**request_options, "timeout_in_seconds": 600} + if request_options + else {"timeout_in_seconds": 600} + ), + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Image, + parse_obj_as( + type_=Image, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def restart_container( + self, + *, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Restart docker container. + + Args: + payload (RestartAppContainer) -- the required data (app_name and variant_name) + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.restart_container( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/restart_container", + method="POST", + json={ + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def container_templates( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ContainerTemplatesResponse: + """ + Returns a list of templates available for creating new containers. + + Parameters: + stoken_session (SessionContainer): The session container for the user. + + Returns: + + Union[List[Template], str]: A list of templates or an error message. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ContainerTemplatesResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.container_templates() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/templates", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ContainerTemplatesResponse, + parse_obj_as( + type_=ContainerTemplatesResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def construct_app_container_url( + self, + *, + base_id: typing.Optional[str] = None, + variant_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Constructs the URL for an app container based on the provided base_id or variant_id. + + Args: + base_id (Optional[str]): The ID of the base to use for the app container. + variant_id (Optional[str]): The ID of the variant to use for the app container. + request (Request): The request object. + + Returns: + URI: The URI for the app container. + + Raises: + HTTPException: If the base or variant cannot be found or the user does not have access. + + Parameters + ---------- + base_id : typing.Optional[str] + + variant_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.construct_app_container_url() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/container_url", + method="GET", + params={ + "base_id": base_id, + "variant_id": variant_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/containers/types/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/types/__init__.py new file mode 100644 index 0000000000..b68c5ab25e --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .container_templates_response import ContainerTemplatesResponse + +__all__ = ["ContainerTemplatesResponse"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/containers/types/container_templates_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/types/container_templates_response.py new file mode 100644 index 0000000000..27177d4d0a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/containers/types/container_templates_response.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.template import Template + +ContainerTemplatesResponse = typing.Union[typing.List[Template], str] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/__init__.py new file mode 100644 index 0000000000..f03aecbfe1 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/__init__.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from .api_error import ApiError +from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper +from .datetime_utils import serialize_datetime +from .file import File, convert_file_dict_to_httpx_tuples, with_content_type +from .http_client import AsyncHttpClient, HttpClient +from .jsonable_encoder import jsonable_encoder +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + UniversalBaseModel, + UniversalRootModel, + parse_obj_as, + universal_field_validator, + universal_root_validator, + update_forward_refs, +) +from .query_encoder import encode_query +from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions +from .serialization import FieldMetadata, convert_and_respect_annotation_metadata + +__all__ = [ + "ApiError", + "AsyncClientWrapper", + "AsyncHttpClient", + "BaseClientWrapper", + "FieldMetadata", + "File", + "HttpClient", + "IS_PYDANTIC_V2", + "RequestOptions", + "SyncClientWrapper", + "UniversalBaseModel", + "UniversalRootModel", + "convert_and_respect_annotation_metadata", + "convert_file_dict_to_httpx_tuples", + "encode_query", + "jsonable_encoder", + "parse_obj_as", + "remove_none_from_dict", + "serialize_datetime", + "universal_field_validator", + "universal_root_validator", + "update_forward_refs", + "with_content_type", +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/api_error.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/api_error.py new file mode 100644 index 0000000000..da734b5806 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/api_error.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + + +class ApiError(Exception): + status_code: typing.Optional[int] + body: typing.Any + + def __init__( + self, *, status_code: typing.Optional[int] = None, body: typing.Any = None + ): + self.status_code = status_code + self.body = body + + def __str__(self) -> str: + return f"status_code: {self.status_code}, body: {self.body}" diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/client_wrapper.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/client_wrapper.py new file mode 100644 index 0000000000..3c2a647bb4 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/client_wrapper.py @@ -0,0 +1,64 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +import httpx +from .http_client import HttpClient +from .http_client import AsyncHttpClient + + +class BaseClientWrapper: + def __init__( + self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None + ): + self.api_key = api_key + self._base_url = base_url + self._timeout = timeout + + def get_headers(self) -> typing.Dict[str, str]: + headers: typing.Dict[str, str] = { + "X-Fern-Language": "Python", + } + headers["Authorization"] = self.api_key + return headers + + def get_base_url(self) -> str: + return self._base_url + + def get_timeout(self) -> typing.Optional[float]: + return self._timeout + + +class SyncClientWrapper(BaseClientWrapper): + def __init__( + self, + *, + api_key: str, + base_url: str, + timeout: typing.Optional[float] = None, + httpx_client: httpx.Client, + ): + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.httpx_client = HttpClient( + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, + ) + + +class AsyncClientWrapper(BaseClientWrapper): + def __init__( + self, + *, + api_key: str, + base_url: str, + timeout: typing.Optional[float] = None, + httpx_client: httpx.AsyncClient, + ): + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.httpx_client = AsyncHttpClient( + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, + ) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/datetime_utils.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/datetime_utils.py new file mode 100644 index 0000000000..47344e9d9c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/datetime_utils.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + + +def serialize_datetime(v: dt.datetime) -> str: + """ + Serialize a datetime including timezone info. + + Uses the timezone info provided if present, otherwise uses the current runtime's timezone info. + + UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00. + """ + + def _serialize_zoned_datetime(v: dt.datetime) -> str: + if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname( + None + ): + # UTC is a special case where we use "Z" at the end instead of "+00:00" + return v.isoformat().replace("+00:00", "Z") + else: + # Delegate to the typical +/- offset format + return v.isoformat() + + if v.tzinfo is not None: + return _serialize_zoned_datetime(v) + else: + local_tz = dt.datetime.now().astimezone().tzinfo + localized_dt = v.replace(tzinfo=local_tz) + return _serialize_zoned_datetime(localized_dt) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/file.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/file.py new file mode 100644 index 0000000000..a9623d336a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/file.py @@ -0,0 +1,65 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import IO, Dict, List, Mapping, Optional, Tuple, Union, cast + +# File typing inspired by the flexibility of types within the httpx library +# https://github.com/encode/httpx/blob/master/httpx/_types.py +FileContent = Union[IO[bytes], bytes, str] +File = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[ + Optional[str], + FileContent, + Optional[str], + Mapping[str, str], + ], +] + + +def convert_file_dict_to_httpx_tuples( + d: Dict[str, Union[File, List[File]]], +) -> List[Tuple[str, File]]: + """ + The format we use is a list of tuples, where the first element is the + name of the file and the second is the file object. Typically HTTPX wants + a dict, but to be able to send lists of files, you have to use the list + approach (which also works for non-lists) + https://github.com/encode/httpx/pull/1032 + """ + + httpx_tuples = [] + for key, file_like in d.items(): + if isinstance(file_like, list): + for file_like_item in file_like: + httpx_tuples.append((key, file_like_item)) + else: + httpx_tuples.append((key, file_like)) + return httpx_tuples + + +def with_content_type(*, file: File, content_type: str) -> File: + """ """ + if isinstance(file, tuple): + if len(file) == 2: + filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore + return (filename, content, content_type) + elif len(file) == 3: + filename, content, _ = cast( + Tuple[Optional[str], FileContent, Optional[str]], file + ) # type: ignore + return (filename, content, content_type) + elif len(file) == 4: + filename, content, _, headers = cast( # type: ignore + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], + file, + ) + return (filename, content, content_type, headers) + else: + raise ValueError(f"Unexpected tuple length: {len(file)}") + return (None, file, content_type) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/http_client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/http_client.py new file mode 100644 index 0000000000..c9f1d7a59b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/http_client.py @@ -0,0 +1,575 @@ +# This file was auto-generated by Fern from our API Definition. + +import asyncio +import email.utils +import json +import re +import time +import typing +import urllib.parse +from contextlib import asynccontextmanager, contextmanager +from random import random + +import httpx + +from .file import File, convert_file_dict_to_httpx_tuples +from .jsonable_encoder import jsonable_encoder +from .query_encoder import encode_query +from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions + +INITIAL_RETRY_DELAY_SECONDS = 0.5 +MAX_RETRY_DELAY_SECONDS = 10 +MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30 + + +def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]: + """ + This function parses the `Retry-After` header in a HTTP response and returns the number of seconds to wait. + + Inspired by the urllib3 retry implementation. + """ + retry_after_ms = response_headers.get("retry-after-ms") + if retry_after_ms is not None: + try: + return int(retry_after_ms) / 1000 if retry_after_ms > 0 else 0 + except Exception: + pass + + retry_after = response_headers.get("retry-after") + if retry_after is None: + return None + + # Attempt to parse the header as an int. + if re.match(r"^\s*[0-9]+\s*$", retry_after): + seconds = float(retry_after) + # Fallback to parsing it as a date. + else: + retry_date_tuple = email.utils.parsedate_tz(retry_after) + if retry_date_tuple is None: + return None + if retry_date_tuple[9] is None: # Python 2 + # Assume UTC if no timezone was specified + # On Python2.7, parsedate_tz returns None for a timezone offset + # instead of 0 if no timezone is given, where mktime_tz treats + # a None timezone offset as local time. + retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:] + + retry_date = email.utils.mktime_tz(retry_date_tuple) + seconds = retry_date - time.time() + + if seconds < 0: + seconds = 0 + + return seconds + + +def _retry_timeout(response: httpx.Response, retries: int) -> float: + """ + Determine the amount of time to wait before retrying a request. + This function begins by trying to parse a retry-after header from the response, and then proceeds to use exponential backoff + with a jitter to determine the number of seconds to wait. + """ + + # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + retry_after = _parse_retry_after(response.headers) + if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER: + return retry_after + + # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS. + retry_delay = min( + INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS + ) + + # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries. + timeout = retry_delay * (1 - 0.25 * random()) + return timeout if timeout >= 0 else 0 + + +def _should_retry(response: httpx.Response) -> bool: + retriable_400s = [429, 408, 409] + return response.status_code >= 500 or response.status_code in retriable_400s + + +def remove_omit_from_dict( + original: typing.Dict[str, typing.Optional[typing.Any]], + omit: typing.Optional[typing.Any], +) -> typing.Dict[str, typing.Any]: + if omit is None: + return original + new: typing.Dict[str, typing.Any] = {} + for key, value in original.items(): + if value is not omit: + new[key] = value + return new + + +def maybe_filter_request_body( + data: typing.Optional[typing.Any], + request_options: typing.Optional[RequestOptions], + omit: typing.Optional[typing.Any], +) -> typing.Optional[typing.Any]: + if data is None: + return ( + jsonable_encoder(request_options.get("additional_body_parameters", {})) + or {} + if request_options is not None + else None + ) + elif not isinstance(data, typing.Mapping): + data_content = jsonable_encoder(data) + else: + data_content = { + **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore + **( + jsonable_encoder(request_options.get("additional_body_parameters", {})) + or {} + if request_options is not None + else {} + ), + } + return data_content + + +# Abstracted out for testing purposes +def get_request_body( + *, + json: typing.Optional[typing.Any], + data: typing.Optional[typing.Any], + request_options: typing.Optional[RequestOptions], + omit: typing.Optional[typing.Any], +) -> typing.Tuple[typing.Optional[typing.Any], typing.Optional[typing.Any]]: + json_body = None + data_body = None + if data is not None: + data_body = maybe_filter_request_body(data, request_options, omit) + else: + # If both data and json are None, we send json data in the event extra properties are specified + json_body = maybe_filter_request_body(json, request_options, omit) + + # If you have an empty JSON body, you should just send None + return ( + json_body if json_body != {} else None + ), data_body if data_body != {} else None + + +class HttpClient: + def __init__( + self, + *, + httpx_client: httpx.Client, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, + ): + self.base_url = base_url + self.base_timeout = base_timeout + self.base_headers = base_headers + self.httpx_client = httpx_client + + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + + if base_url is None: + raise ValueError( + "A base_url is required to make this request, please provide one and try again." + ) + return base_url + + def request( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> httpx.Response: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + response = self.httpx_client.request( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) or {} + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if (files is not None and files is not omit) + else None + ), + timeout=timeout, + ) + + max_retries: int = ( + request_options.get("max_retries", 0) if request_options is not None else 0 + ) + if _should_retry(response=response): + if max_retries > retries: + time.sleep(_retry_timeout(response=response, retries=retries)) + return self.request( + path=path, + method=method, + base_url=base_url, + params=params, + json=json, + content=content, + files=files, + headers=headers, + request_options=request_options, + retries=retries + 1, + omit=omit, + ) + + return response + + @contextmanager + def stream( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> typing.Iterator[httpx.Response]: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + with self.httpx_client.stream( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if (files is not None and files is not omit) + else None + ), + timeout=timeout, + ) as stream: + yield stream + + +class AsyncHttpClient: + def __init__( + self, + *, + httpx_client: httpx.AsyncClient, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, + ): + self.base_url = base_url + self.base_timeout = base_timeout + self.base_headers = base_headers + self.httpx_client = httpx_client + + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + + if base_url is None: + raise ValueError( + "A base_url is required to make this request, please provide one and try again." + ) + return base_url + + async def request( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> httpx.Response: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + # Add the input to each of these and do None-safety checks + response = await self.httpx_client.request( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) or {} + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if files is not None + else None + ), + timeout=timeout, + ) + + max_retries: int = ( + request_options.get("max_retries", 0) if request_options is not None else 0 + ) + if _should_retry(response=response): + if max_retries > retries: + await asyncio.sleep(_retry_timeout(response=response, retries=retries)) + return await self.request( + path=path, + method=method, + base_url=base_url, + params=params, + json=json, + content=content, + files=files, + headers=headers, + request_options=request_options, + retries=retries + 1, + omit=omit, + ) + return response + + @asynccontextmanager + async def stream( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> typing.AsyncIterator[httpx.Response]: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + async with self.httpx_client.stream( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + if request_options is not None + else {} + ), + }, + omit=omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if files is not None + else None + ), + timeout=timeout, + ) as stream: + yield stream diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/jsonable_encoder.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/jsonable_encoder.py new file mode 100644 index 0000000000..12a8b52fc2 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/jsonable_encoder.py @@ -0,0 +1,103 @@ +# This file was auto-generated by Fern from our API Definition. + +""" +jsonable_encoder converts a Python object to a JSON-friendly dict +(e.g. datetimes to strings, Pydantic models to dicts). + +Taken from FastAPI, and made a bit simpler +https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py +""" + +import base64 +import dataclasses +import datetime as dt +from enum import Enum +from pathlib import PurePath +from types import GeneratorType +from typing import Any, Callable, Dict, List, Optional, Set, Union + +import pydantic + +from .datetime_utils import serialize_datetime +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + encode_by_type, + to_jsonable_with_fallback, +) + +SetIntStr = Set[Union[int, str]] +DictIntStrAny = Dict[Union[int, str], Any] + + +def jsonable_encoder( + obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None +) -> Any: + custom_encoder = custom_encoder or {} + if custom_encoder: + if type(obj) in custom_encoder: + return custom_encoder[type(obj)](obj) + else: + for encoder_type, encoder_instance in custom_encoder.items(): + if isinstance(obj, encoder_type): + return encoder_instance(obj) + if isinstance(obj, pydantic.BaseModel): + if IS_PYDANTIC_V2: + encoder = getattr(obj.model_config, "json_encoders", {}) # type: ignore # Pydantic v2 + else: + encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1 + if custom_encoder: + encoder.update(custom_encoder) + obj_dict = obj.dict(by_alias=True) + if "__root__" in obj_dict: + obj_dict = obj_dict["__root__"] + if "root" in obj_dict: + obj_dict = obj_dict["root"] + return jsonable_encoder(obj_dict, custom_encoder=encoder) + if dataclasses.is_dataclass(obj): + obj_dict = dataclasses.asdict(obj) # type: ignore + return jsonable_encoder(obj_dict, custom_encoder=custom_encoder) + if isinstance(obj, bytes): + return base64.b64encode(obj).decode("utf-8") + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, PurePath): + return str(obj) + if isinstance(obj, (str, int, float, type(None))): + return obj + if isinstance(obj, dt.datetime): + return serialize_datetime(obj) + if isinstance(obj, dt.date): + return str(obj) + if isinstance(obj, dict): + encoded_dict = {} + allowed_keys = set(obj.keys()) + for key, value in obj.items(): + if key in allowed_keys: + encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder) + encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder) + encoded_dict[encoded_key] = encoded_value + return encoded_dict + if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): + encoded_list = [] + for item in obj: + encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder)) + return encoded_list + + def fallback_serializer(o: Any) -> Any: + attempt_encode = encode_by_type(o) + if attempt_encode is not None: + return attempt_encode + + try: + data = dict(o) + except Exception as e: + errors: List[Exception] = [] + errors.append(e) + try: + data = vars(o) + except Exception as e: + errors.append(e) + raise ValueError(errors) from e + return jsonable_encoder(data, custom_encoder=custom_encoder) + + return to_jsonable_with_fallback(obj, fallback_serializer) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/pydantic_utilities.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/pydantic_utilities.py new file mode 100644 index 0000000000..b4b9605ebc --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/pydantic_utilities.py @@ -0,0 +1,325 @@ +# This file was auto-generated by Fern from our API Definition. + +# nopycln: file +import datetime as dt +import typing +from collections import defaultdict + +import typing_extensions + +import pydantic + +from .datetime_utils import serialize_datetime +from .serialization import convert_and_respect_annotation_metadata + +IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + +if IS_PYDANTIC_V2: + # isort will try to reformat the comments on these imports, which breaks mypy + # isort: off + from pydantic.v1.datetime_parse import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 + parse_date as parse_date, + ) + from pydantic.v1.datetime_parse import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + parse_datetime as parse_datetime, + ) + from pydantic.v1.json import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 + ENCODERS_BY_TYPE as encoders_by_type, + ) + from pydantic.v1.typing import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 + get_args as get_args, + ) + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + get_origin as get_origin, + ) + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + is_literal_type as is_literal_type, + ) + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + is_union as is_union, + ) + from pydantic.v1.fields import ModelField as ModelField # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 +else: + from pydantic.datetime_parse import parse_date as parse_date # type: ignore # Pydantic v1 + from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore # Pydantic v1 + from pydantic.fields import ModelField as ModelField # type: ignore # Pydantic v1 + from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore # Pydantic v1 + from pydantic.typing import get_args as get_args # type: ignore # Pydantic v1 + from pydantic.typing import get_origin as get_origin # type: ignore # Pydantic v1 + from pydantic.typing import is_literal_type as is_literal_type # type: ignore # Pydantic v1 + from pydantic.typing import is_union as is_union # type: ignore # Pydantic v1 + + # isort: on + + +T = typing.TypeVar("T") +Model = typing.TypeVar("Model", bound=pydantic.BaseModel) + + +def parse_obj_as(type_: typing.Type[T], object_: typing.Any) -> T: + dealiased_object = convert_and_respect_annotation_metadata( + object_=object_, annotation=type_, direction="read" + ) + if IS_PYDANTIC_V2: + adapter = pydantic.TypeAdapter(type_) # type: ignore # Pydantic v2 + return adapter.validate_python(dealiased_object) + else: + return pydantic.parse_obj_as(type_, dealiased_object) + + +def to_jsonable_with_fallback( + obj: typing.Any, fallback_serializer: typing.Callable[[typing.Any], typing.Any] +) -> typing.Any: + if IS_PYDANTIC_V2: + from pydantic_core import to_jsonable_python + + return to_jsonable_python(obj, fallback=fallback_serializer) + else: + return fallback_serializer(obj) + + +class UniversalBaseModel(pydantic.BaseModel): + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + # Allow fields begining with `model_` to be used in the model + protected_namespaces=(), + ) # type: ignore # Pydantic v2 + + @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore # Pydantic v2 + def serialize_model( + self, handler: pydantic.SerializerFunctionWrapHandler + ) -> typing.Any: # type: ignore # Pydantic v2 + serialized = handler(self) + data = { + k: serialize_datetime(v) if isinstance(v, dt.datetime) else v + for k, v in serialized.items() + } + return data + + else: + + class Config: + smart_union = True + json_encoders = {dt.datetime: serialize_datetime} + + @classmethod + def model_construct( + cls: typing.Type["Model"], + _fields_set: typing.Optional[typing.Set[str]] = None, + **values: typing.Any, + ) -> "Model": + dealiased_object = convert_and_respect_annotation_metadata( + object_=values, annotation=cls, direction="read" + ) + return cls.construct(_fields_set, **dealiased_object) + + @classmethod + def construct( + cls: typing.Type["Model"], + _fields_set: typing.Optional[typing.Set[str]] = None, + **values: typing.Any, + ) -> "Model": + dealiased_object = convert_and_respect_annotation_metadata( + object_=values, annotation=cls, direction="read" + ) + if IS_PYDANTIC_V2: + return super().model_construct(_fields_set, **dealiased_object) # type: ignore # Pydantic v2 + else: + return super().construct(_fields_set, **dealiased_object) + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + if IS_PYDANTIC_V2: + return super().model_dump_json(**kwargs_with_defaults) # type: ignore # Pydantic v2 + else: + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + """ + Override the default dict method to `exclude_unset` by default. This function patches + `exclude_unset` to work include fields within non-None default values. + """ + # Note: the logic here is multi-plexed given the levers exposed in Pydantic V1 vs V2 + # Pydantic V1's .dict can be extremely slow, so we do not want to call it twice. + # + # We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models + # that we have less control over, and this is less intrusive than custom serializers for now. + if IS_PYDANTIC_V2: + kwargs_with_defaults_exclude_unset: typing.Any = { + **kwargs, + "by_alias": True, + "exclude_unset": True, + "exclude_none": False, + } + kwargs_with_defaults_exclude_none: typing.Any = { + **kwargs, + "by_alias": True, + "exclude_none": True, + "exclude_unset": False, + } + dict_dump = deep_union_pydantic_dicts( + super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore # Pydantic v2 + super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore # Pydantic v2 + ) + + else: + _fields_set = self.__fields_set__.copy() + + fields = _get_model_fields(self.__class__) + for name, field in fields.items(): + if name not in _fields_set: + default = _get_field_default(field) + + # If the default values are non-null act like they've been set + # This effectively allows exclude_unset to work like exclude_none where + # the latter passes through intentionally set none values. + if default is not None or ( + "exclude_unset" in kwargs and not kwargs["exclude_unset"] + ): + _fields_set.add(name) + + if default is not None: + self.__fields_set__.add(name) + + kwargs_with_defaults_exclude_unset_include_fields: typing.Any = { + "by_alias": True, + "exclude_unset": True, + "include": _fields_set, + **kwargs, + } + + dict_dump = super().dict( + **kwargs_with_defaults_exclude_unset_include_fields + ) + + return convert_and_respect_annotation_metadata( + object_=dict_dump, annotation=self.__class__, direction="write" + ) + + +def _union_list_of_pydantic_dicts( + source: typing.List[typing.Any], destination: typing.List[typing.Any] +) -> typing.List[typing.Any]: + converted_list: typing.List[typing.Any] = [] + for i, item in enumerate(source): + destination_value = destination[i] # type: ignore + if isinstance(item, dict): + converted_list.append(deep_union_pydantic_dicts(item, destination_value)) + elif isinstance(item, list): + converted_list.append( + _union_list_of_pydantic_dicts(item, destination_value) + ) + else: + converted_list.append(item) + return converted_list + + +def deep_union_pydantic_dicts( + source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any] +) -> typing.Dict[str, typing.Any]: + for key, value in source.items(): + node = destination.setdefault(key, {}) + if isinstance(value, dict): + deep_union_pydantic_dicts(value, node) + # Note: we do not do this same processing for sets given we do not have sets of models + # and given the sets are unordered, the processing of the set and matching objects would + # be non-trivial. + elif isinstance(value, list): + destination[key] = _union_list_of_pydantic_dicts(value, node) + else: + destination[key] = value + + return destination + + +if IS_PYDANTIC_V2: + + class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore # Pydantic v2 + pass + + UniversalRootModel: typing_extensions.TypeAlias = V2RootModel # type: ignore +else: + UniversalRootModel: typing_extensions.TypeAlias = UniversalBaseModel # type: ignore + + +def encode_by_type(o: typing.Any) -> typing.Any: + encoders_by_class_tuples: typing.Dict[ + typing.Callable[[typing.Any], typing.Any], typing.Tuple[typing.Any, ...] + ] = defaultdict(tuple) + for type_, encoder in encoders_by_type.items(): + encoders_by_class_tuples[encoder] += (type_,) + + if type(o) in encoders_by_type: + return encoders_by_type[type(o)](o) + for encoder, classes_tuple in encoders_by_class_tuples.items(): + if isinstance(o, classes_tuple): + return encoder(o) + + +def update_forward_refs(model: typing.Type["Model"], **localns: typing.Any) -> None: + if IS_PYDANTIC_V2: + model.model_rebuild(raise_errors=False) # type: ignore # Pydantic v2 + else: + model.update_forward_refs(**localns) + + +# Mirrors Pydantic's internal typing +AnyCallable = typing.Callable[..., typing.Any] + + +def universal_root_validator( + pre: bool = False, +) -> typing.Callable[[AnyCallable], AnyCallable]: + def decorator(func: AnyCallable) -> AnyCallable: + if IS_PYDANTIC_V2: + return pydantic.model_validator(mode="before" if pre else "after")(func) # type: ignore # Pydantic v2 + else: + return pydantic.root_validator(pre=pre)(func) # type: ignore # Pydantic v1 + + return decorator + + +def universal_field_validator( + field_name: str, pre: bool = False +) -> typing.Callable[[AnyCallable], AnyCallable]: + def decorator(func: AnyCallable) -> AnyCallable: + if IS_PYDANTIC_V2: + return pydantic.field_validator( + field_name, mode="before" if pre else "after" + )( + func + ) # type: ignore # Pydantic v2 + else: + return pydantic.validator(field_name, pre=pre)(func) # type: ignore # Pydantic v1 + + return decorator + + +PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo] + + +def _get_model_fields( + model: typing.Type["Model"], +) -> typing.Mapping[str, PydanticField]: + if IS_PYDANTIC_V2: + return model.model_fields # type: ignore # Pydantic v2 + else: + return model.__fields__ # type: ignore # Pydantic v1 + + +def _get_field_default(field: PydanticField) -> typing.Any: + try: + value = field.get_default() # type: ignore # Pydantic < v1.10.15 + except: + value = field.default + if IS_PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/query_encoder.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/query_encoder.py new file mode 100644 index 0000000000..03fbf59bd1 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/query_encoder.py @@ -0,0 +1,60 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, List, Optional, Tuple + +import pydantic + + +# Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict +def traverse_query_dict( + dict_flat: Dict[str, Any], key_prefix: Optional[str] = None +) -> List[Tuple[str, Any]]: + result = [] + for k, v in dict_flat.items(): + key = f"{key_prefix}[{k}]" if key_prefix is not None else k + if isinstance(v, dict): + result.extend(traverse_query_dict(v, key)) + elif isinstance(v, list): + for arr_v in v: + if isinstance(arr_v, dict): + result.extend(traverse_query_dict(arr_v, key)) + else: + result.append((key, arr_v)) + else: + result.append((key, v)) + return result + + +def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]: + if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict): + if isinstance(query_value, pydantic.BaseModel): + obj_dict = query_value.dict(by_alias=True) + else: + obj_dict = query_value + return traverse_query_dict(obj_dict, query_key) + elif isinstance(query_value, list): + encoded_values: List[Tuple[str, Any]] = [] + for value in query_value: + if isinstance(value, pydantic.BaseModel) or isinstance(value, dict): + if isinstance(value, pydantic.BaseModel): + obj_dict = value.dict(by_alias=True) + elif isinstance(value, dict): + obj_dict = value + + encoded_values.extend(single_query_encoder(query_key, obj_dict)) + else: + encoded_values.append((query_key, value)) + + return encoded_values + + return [(query_key, query_value)] + + +def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]: + if query is None: + return None + + encoded_query = [] + for k, v in query.items(): + encoded_query.extend(single_query_encoder(k, v)) + return encoded_query diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/remove_none_from_dict.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/remove_none_from_dict.py new file mode 100644 index 0000000000..c2298143f1 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/remove_none_from_dict.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, Mapping, Optional + + +def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]: + new: Dict[str, Any] = {} + for key, value in original.items(): + if value is not None: + new[key] = value + return new diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/request_options.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/request_options.py new file mode 100644 index 0000000000..1b38804432 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/request_options.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +try: + from typing import NotRequired # type: ignore +except ImportError: + from typing_extensions import NotRequired + + +class RequestOptions(typing.TypedDict, total=False): + """ + Additional options for request-specific configuration when calling APIs via the SDK. + This is used primarily as an optional final parameter for service functions. + + Attributes: + - timeout_in_seconds: int. The number of seconds to await an API call before timing out. + + - max_retries: int. The max number of retries to attempt if the API call fails. + + - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict + + - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict + + - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict + + - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads. + """ + + timeout_in_seconds: NotRequired[int] + max_retries: NotRequired[int] + additional_headers: NotRequired[typing.Dict[str, typing.Any]] + additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]] + additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]] + chunk_size: NotRequired[int] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/core/serialization.py b/agenta-cli/debugging/simple-app/agenta/client/backend/core/serialization.py new file mode 100644 index 0000000000..5679deb8a5 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/core/serialization.py @@ -0,0 +1,276 @@ +# This file was auto-generated by Fern from our API Definition. + +import collections +import inspect +import typing + +import typing_extensions + +import pydantic + + +class FieldMetadata: + """ + Metadata class used to annotate fields to provide additional information. + + Example: + class MyDict(TypedDict): + field: typing.Annotated[str, FieldMetadata(alias="field_name")] + + Will serialize: `{"field": "value"}` + To: `{"field_name": "value"}` + """ + + alias: str + + def __init__(self, *, alias: str) -> None: + self.alias = alias + + +def convert_and_respect_annotation_metadata( + *, + object_: typing.Any, + annotation: typing.Any, + inner_type: typing.Optional[typing.Any] = None, + direction: typing.Literal["read", "write"], +) -> typing.Any: + """ + Respect the metadata annotations on a field, such as aliasing. This function effectively + manipulates the dict-form of an object to respect the metadata annotations. This is primarily used for + TypedDicts, which cannot support aliasing out of the box, and can be extended for additional + utilities, such as defaults. + + Parameters + ---------- + object_ : typing.Any + + annotation : type + The type we're looking to apply typing annotations from + + inner_type : typing.Optional[type] + + Returns + ------- + typing.Any + """ + + if object_ is None: + return None + if inner_type is None: + inner_type = annotation + + clean_type = _remove_annotations(inner_type) + # Pydantic models + if ( + inspect.isclass(clean_type) + and issubclass(clean_type, pydantic.BaseModel) + and isinstance(object_, typing.Mapping) + ): + return _convert_mapping(object_, clean_type, direction) + # TypedDicts + if typing_extensions.is_typeddict(clean_type) and isinstance( + object_, typing.Mapping + ): + return _convert_mapping(object_, clean_type, direction) + + if ( + typing_extensions.get_origin(clean_type) == typing.Dict + or typing_extensions.get_origin(clean_type) == dict + or clean_type == typing.Dict + ) and isinstance(object_, typing.Dict): + key_type = typing_extensions.get_args(clean_type)[0] + value_type = typing_extensions.get_args(clean_type)[1] + + return { + key: convert_and_respect_annotation_metadata( + object_=value, + annotation=annotation, + inner_type=value_type, + direction=direction, + ) + for key, value in object_.items() + } + + # If you're iterating on a string, do not bother to coerce it to a sequence. + if not isinstance(object_, str): + if ( + typing_extensions.get_origin(clean_type) == typing.Set + or typing_extensions.get_origin(clean_type) == set + or clean_type == typing.Set + ) and isinstance(object_, typing.Set): + inner_type = typing_extensions.get_args(clean_type)[0] + return { + convert_and_respect_annotation_metadata( + object_=item, + annotation=annotation, + inner_type=inner_type, + direction=direction, + ) + for item in object_ + } + elif ( + ( + typing_extensions.get_origin(clean_type) == typing.List + or typing_extensions.get_origin(clean_type) == list + or clean_type == typing.List + ) + and isinstance(object_, typing.List) + ) or ( + ( + typing_extensions.get_origin(clean_type) == typing.Sequence + or typing_extensions.get_origin(clean_type) == collections.abc.Sequence + or clean_type == typing.Sequence + ) + and isinstance(object_, typing.Sequence) + ): + inner_type = typing_extensions.get_args(clean_type)[0] + return [ + convert_and_respect_annotation_metadata( + object_=item, + annotation=annotation, + inner_type=inner_type, + direction=direction, + ) + for item in object_ + ] + + if typing_extensions.get_origin(clean_type) == typing.Union: + # We should be able to ~relatively~ safely try to convert keys against all + # member types in the union, the edge case here is if one member aliases a field + # of the same name to a different name from another member + # Or if another member aliases a field of the same name that another member does not. + for member in typing_extensions.get_args(clean_type): + object_ = convert_and_respect_annotation_metadata( + object_=object_, + annotation=annotation, + inner_type=member, + direction=direction, + ) + return object_ + + annotated_type = _get_annotation(annotation) + if annotated_type is None: + return object_ + + # If the object is not a TypedDict, a Union, or other container (list, set, sequence, etc.) + # Then we can safely call it on the recursive conversion. + return object_ + + +def _convert_mapping( + object_: typing.Mapping[str, object], + expected_type: typing.Any, + direction: typing.Literal["read", "write"], +) -> typing.Mapping[str, object]: + converted_object: typing.Dict[str, object] = {} + annotations = typing_extensions.get_type_hints(expected_type, include_extras=True) + aliases_to_field_names = _get_alias_to_field_name(annotations) + for key, value in object_.items(): + if direction == "read" and key in aliases_to_field_names: + dealiased_key = aliases_to_field_names.get(key) + if dealiased_key is not None: + type_ = annotations.get(dealiased_key) + else: + type_ = annotations.get(key) + # Note you can't get the annotation by the field name if you're in read mode, so you must check the aliases map + # + # So this is effectively saying if we're in write mode, and we don't have a type, or if we're in read mode and we don't have an alias + # then we can just pass the value through as is + if type_ is None: + converted_object[key] = value + elif direction == "read" and key not in aliases_to_field_names: + converted_object[key] = convert_and_respect_annotation_metadata( + object_=value, annotation=type_, direction=direction + ) + else: + converted_object[ + _alias_key(key, type_, direction, aliases_to_field_names) + ] = convert_and_respect_annotation_metadata( + object_=value, annotation=type_, direction=direction + ) + return converted_object + + +def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]: + maybe_annotated_type = typing_extensions.get_origin(type_) + if maybe_annotated_type is None: + return None + + if maybe_annotated_type == typing_extensions.NotRequired: + type_ = typing_extensions.get_args(type_)[0] + maybe_annotated_type = typing_extensions.get_origin(type_) + + if maybe_annotated_type == typing_extensions.Annotated: + return type_ + + return None + + +def _remove_annotations(type_: typing.Any) -> typing.Any: + maybe_annotated_type = typing_extensions.get_origin(type_) + if maybe_annotated_type is None: + return type_ + + if maybe_annotated_type == typing_extensions.NotRequired: + return _remove_annotations(typing_extensions.get_args(type_)[0]) + + if maybe_annotated_type == typing_extensions.Annotated: + return _remove_annotations(typing_extensions.get_args(type_)[0]) + + return type_ + + +def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]: + annotations = typing_extensions.get_type_hints(type_, include_extras=True) + return _get_alias_to_field_name(annotations) + + +def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]: + annotations = typing_extensions.get_type_hints(type_, include_extras=True) + return _get_field_to_alias_name(annotations) + + +def _get_alias_to_field_name( + field_to_hint: typing.Dict[str, typing.Any], +) -> typing.Dict[str, str]: + aliases = {} + for field, hint in field_to_hint.items(): + maybe_alias = _get_alias_from_type(hint) + if maybe_alias is not None: + aliases[maybe_alias] = field + return aliases + + +def _get_field_to_alias_name( + field_to_hint: typing.Dict[str, typing.Any], +) -> typing.Dict[str, str]: + aliases = {} + for field, hint in field_to_hint.items(): + maybe_alias = _get_alias_from_type(hint) + if maybe_alias is not None: + aliases[field] = maybe_alias + return aliases + + +def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]: + maybe_annotated_type = _get_annotation(type_) + + if maybe_annotated_type is not None: + # The actual annotations are 1 onward, the first is the annotated type + annotations = typing_extensions.get_args(maybe_annotated_type)[1:] + + for annotation in annotations: + if isinstance(annotation, FieldMetadata) and annotation.alias is not None: + return annotation.alias + return None + + +def _alias_key( + key: str, + type_: typing.Any, + direction: typing.Literal["read", "write"], + aliases_to_field_names: typing.Dict[str, str], +) -> str: + if direction == "read": + return aliases_to_field_names.get(key, key) + return _get_alias_from_type(type_=type_) or key diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/environments/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/environments/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/environments/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/environments/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/environments/client.py new file mode 100644 index 0000000000..1cfbb07010 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/environments/client.py @@ -0,0 +1,190 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EnvironmentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def deploy_to_environment( + self, + *, + environment_name: str, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Deploys a given variant to an environment + + Args: + environment_name: Name of the environment to deploy to. + variant_id: variant id to deploy. + stoken_session: . Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the deployment fails. + + Parameters + ---------- + environment_name : str + + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.environments.deploy_to_environment( + environment_name="environment_name", + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "environments/deploy", + method="POST", + json={ + "environment_name": environment_name, + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEnvironmentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def deploy_to_environment( + self, + *, + environment_name: str, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Deploys a given variant to an environment + + Args: + environment_name: Name of the environment to deploy to. + variant_id: variant id to deploy. + stoken_session: . Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the deployment fails. + + Parameters + ---------- + environment_name : str + + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.environments.deploy_to_environment( + environment_name="environment_name", + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "environments/deploy", + method="POST", + json={ + "environment_name": environment_name, + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/errors/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/errors/__init__.py new file mode 100644 index 0000000000..cb64e066bf --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/errors/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .unprocessable_entity_error import UnprocessableEntityError + +__all__ = ["UnprocessableEntityError"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/errors/unprocessable_entity_error.py b/agenta-cli/debugging/simple-app/agenta/client/backend/errors/unprocessable_entity_error.py new file mode 100644 index 0000000000..47470a70e7 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/errors/unprocessable_entity_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.api_error import ApiError +from ..types.http_validation_error import HttpValidationError + + +class UnprocessableEntityError(ApiError): + def __init__(self, body: HttpValidationError): + super().__init__(status_code=422, body=body) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/evaluations/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluations/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluations/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/evaluations/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluations/client.py new file mode 100644 index 0000000000..a8190dd1f2 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluations/client.py @@ -0,0 +1,1456 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.evaluation import Evaluation +from ..types.llm_run_rate_limit import LlmRunRateLimit +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.jsonable_encoder import jsonable_encoder +from ..types.evaluation_scenario import EvaluationScenario +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def fetch_evaluation_ids( + self, + *, + resource_type: str, + resource_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Fetches evaluation ids for a given resource type and id. + + Arguments: + resource_type (str): The type of resource for which to fetch evaluations. + resource_ids List[ObjectId]: The IDs of resource for which to fetch evaluations. + + Raises: + HTTPException: If the resource_type is invalid or access is denied. + + Returns: + List[str]: A list of evaluation ids. + + Parameters + ---------- + resource_type : str + + resource_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_ids( + resource_type="resource_type", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations/by_resource", + method="GET", + params={ + "resource_type": resource_type, + "resource_ids": resource_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_list_evaluations( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluation]: + """ + Fetches a list of evaluations, optionally filtered by an app ID. + + Args: + app_id (Optional[str]): An optional app ID to filter the evaluations. + + Returns: + List[Evaluation]: A list of evaluations. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_list_evaluations( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_evaluation( + self, + *, + app_id: str, + variant_ids: typing.Sequence[str], + evaluators_configs: typing.Sequence[str], + testset_id: str, + rate_limit: LlmRunRateLimit, + lm_providers_keys: typing.Optional[ + typing.Dict[str, typing.Optional[str]] + ] = OMIT, + correct_answer_column: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[Evaluation]: + """ + Creates a new comparison table document + Raises: + HTTPException: _description_ + Returns: + _description_ + + Parameters + ---------- + app_id : str + + variant_ids : typing.Sequence[str] + + evaluators_configs : typing.Sequence[str] + + testset_id : str + + rate_limit : LlmRunRateLimit + + lm_providers_keys : typing.Optional[typing.Dict[str, typing.Optional[str]]] + + correct_answer_column : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + from agenta import AgentaApi, LlmRunRateLimit + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.create_evaluation( + app_id="app_id", + variant_ids=["variant_ids"], + evaluators_configs=["evaluators_configs"], + testset_id="testset_id", + rate_limit=LlmRunRateLimit( + batch_size=1, + max_retries=1, + retry_delay=1, + delay_between_batches=1, + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "app_id": app_id, + "variant_ids": variant_ids, + "evaluators_configs": evaluators_configs, + "testset_id": testset_id, + "rate_limit": convert_and_respect_annotation_metadata( + object_=rate_limit, annotation=LlmRunRateLimit, direction="write" + ), + "lm_providers_keys": lm_providers_keys, + "correct_answer_column": correct_answer_column, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_evaluations( + self, + *, + evaluations_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific comparison tables based on their unique IDs. + + Args: + delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete. + + Returns: + A list of the deleted comparison tables' IDs. + + Parameters + ---------- + evaluations_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.delete_evaluations( + evaluations_ids=["evaluations_ids"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="DELETE", + json={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_status( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the status of the evaluation. + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + (str): the evaluation status + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_status( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/status", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_results( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the results of the evaluation + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + _type_: _description_ + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_results( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/results", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_scenarios( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[EvaluationScenario]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluationScenario] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_scenarios( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/evaluation_scenarios", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluationScenario], + parse_obj_as( + type_=typing.List[EvaluationScenario], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> Evaluation: + """ + Fetches a single evaluation based on its ID. + + Args: + evaluation_id (str): The ID of the evaluation to fetch. + + Returns: + Evaluation: The fetched evaluation. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Evaluation + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Evaluation, + parse_obj_as( + type_=Evaluation, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_scenarios( + self, + *, + evaluations_ids: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluations_ids : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_scenarios( + evaluations_ids="evaluations_ids", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations/evaluation_scenarios/comparison-results", + method="GET", + params={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEvaluationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def fetch_evaluation_ids( + self, + *, + resource_type: str, + resource_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Fetches evaluation ids for a given resource type and id. + + Arguments: + resource_type (str): The type of resource for which to fetch evaluations. + resource_ids List[ObjectId]: The IDs of resource for which to fetch evaluations. + + Raises: + HTTPException: If the resource_type is invalid or access is denied. + + Returns: + List[str]: A list of evaluation ids. + + Parameters + ---------- + + resource_type : str + + resource_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_ids( + resource_type="resource_type", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations/by_resource", + method="GET", + params={ + "resource_type": resource_type, + "resource_ids": resource_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_list_evaluations( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluation]: + """ + Fetches a list of evaluations, optionally filtered by an app ID. + + Args: + app_id (Optional[str]): An optional app ID to filter the evaluations. + + Returns: + List[Evaluation]: A list of evaluations. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_list_evaluations( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_evaluation( + self, + *, + app_id: str, + variant_ids: typing.Sequence[str], + evaluators_configs: typing.Sequence[str], + testset_id: str, + rate_limit: LlmRunRateLimit, + lm_providers_keys: typing.Optional[ + typing.Dict[str, typing.Optional[str]] + ] = OMIT, + correct_answer_column: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[Evaluation]: + """ + Creates a new comparison table document + Raises: + HTTPException: _description_ + Returns: + _description_ + + Parameters + ---------- + app_id : str + + variant_ids : typing.Sequence[str] + + evaluators_configs : typing.Sequence[str] + + testset_id : str + + rate_limit : LlmRunRateLimit + + lm_providers_keys : typing.Optional[typing.Dict[str, typing.Optional[str]]] + + correct_answer_column : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, LlmRunRateLimit + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.create_evaluation( + app_id="app_id", + variant_ids=["variant_ids"], + evaluators_configs=["evaluators_configs"], + testset_id="testset_id", + rate_limit=LlmRunRateLimit( + batch_size=1, + max_retries=1, + retry_delay=1, + delay_between_batches=1, + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "app_id": app_id, + "variant_ids": variant_ids, + "evaluators_configs": evaluators_configs, + "testset_id": testset_id, + "rate_limit": convert_and_respect_annotation_metadata( + object_=rate_limit, annotation=LlmRunRateLimit, direction="write" + ), + "lm_providers_keys": lm_providers_keys, + "correct_answer_column": correct_answer_column, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_evaluations( + self, + *, + evaluations_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific comparison tables based on their unique IDs. + + Args: + delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete. + + Returns: + A list of the deleted comparison tables' IDs. + + Parameters + ---------- + evaluations_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.delete_evaluations( + evaluations_ids=["evaluations_ids"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="DELETE", + json={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_status( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the status of the evaluation. + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + (str): the evaluation status + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_status( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/status", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_results( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the results of the evaluation + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + _type_: _description_ + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_results( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/results", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_scenarios( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[EvaluationScenario]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluationScenario] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_scenarios( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/evaluation_scenarios", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluationScenario], + parse_obj_as( + type_=typing.List[EvaluationScenario], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> Evaluation: + """ + Fetches a single evaluation based on its ID. + + Args: + evaluation_id (str): The ID of the evaluation to fetch. + + Returns: + Evaluation: The fetched evaluation. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Evaluation + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Evaluation, + parse_obj_as( + type_=Evaluation, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_scenarios( + self, + *, + evaluations_ids: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluations_ids : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_scenarios( + evaluations_ids="evaluations_ids", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations/evaluation_scenarios/comparison-results", + method="GET", + params={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/evaluators/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluators/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluators/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/evaluators/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluators/client.py new file mode 100644 index 0000000000..29bafb0305 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/evaluators/client.py @@ -0,0 +1,1259 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.evaluator import Evaluator +from ..core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.evaluator_mapping_output_interface import EvaluatorMappingOutputInterface +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..types.evaluator_output_interface import EvaluatorOutputInterface +from ..core.jsonable_encoder import jsonable_encoder +from ..types.evaluator_config import EvaluatorConfig +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluatorsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get_evaluators_endpoint( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluator]: + """ + Endpoint to fetch a list of evaluators. + + Returns: + List[Evaluator]: A list of evaluator objects. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluator] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.get_evaluators_endpoint() + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluator], + parse_obj_as( + type_=typing.List[Evaluator], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def evaluator_data_map( + self, + *, + inputs: typing.Dict[str, typing.Optional[typing.Any]], + mapping: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorMappingOutputInterface: + """ + Endpoint to map the experiment data tree to evaluator interface. + + Args: + request (Request): The request object. + payload (EvaluatorMappingInputInterface): The payload containing the request data. + + Returns: + EvaluatorMappingOutputInterface: the evaluator mapping output object + + Parameters + ---------- + inputs : typing.Dict[str, typing.Optional[typing.Any]] + + mapping : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorMappingOutputInterface + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.evaluator_data_map( + inputs={"key": "value"}, + mapping={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/map", + method="POST", + json={ + "inputs": inputs, + "mapping": mapping, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorMappingOutputInterface, + parse_obj_as( + type_=EvaluatorMappingOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def evaluator_run( + self, + evaluator_key: str, + *, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + credentials: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorOutputInterface: + """ + Endpoint to evaluate LLM app run + + Args: + request (Request): The request object. + evaluator_key (str): The key of the evaluator. + payload (EvaluatorInputInterface): The payload containing the request data. + + Returns: + result: EvaluatorOutputInterface object containing the outputs. + + Parameters + ---------- + evaluator_key : str + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorOutputInterface + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.evaluator_run( + evaluator_key="evaluator_key", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(evaluator_key)}/run", + method="POST", + json={ + "inputs": inputs, + "settings": settings, + "credentials": credentials, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorOutputInterface, + parse_obj_as( + type_=EvaluatorOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_evaluator_configs( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EvaluatorConfig]: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluatorConfig] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.get_evaluator_configs( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluatorConfig], + parse_obj_as( + type_=typing.List[EvaluatorConfig], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_new_evaluator_config( + self, + *, + app_id: str, + name: str, + evaluator_key: str, + settings_values: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + EvaluatorConfigDB: Evaluator configuration api model. + + Parameters + ---------- + app_id : str + + name : str + + evaluator_key : str + + settings_values : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.create_new_evaluator_config( + app_id="app_id", + name="name", + evaluator_key="evaluator_key", + settings_values={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="POST", + json={ + "app_id": app_id, + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.get_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_evaluator_config( + self, + evaluator_config_id: str, + *, + name: typing.Optional[str] = OMIT, + evaluator_key: typing.Optional[str] = OMIT, + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to update evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + name : typing.Optional[str] + + evaluator_key : typing.Optional[str] + + settings_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.update_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="PUT", + json={ + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Endpoint to delete a specific evaluator configuration. + + Args: + evaluator_config_id (str): The unique identifier of the evaluator configuration. + + Returns: + bool: True if deletion was successful, False otherwise. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.delete_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEvaluatorsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_evaluators_endpoint( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluator]: + """ + Endpoint to fetch a list of evaluators. + + Returns: + List[Evaluator]: A list of evaluator objects. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluator] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.get_evaluators_endpoint() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluator], + parse_obj_as( + type_=typing.List[Evaluator], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def evaluator_data_map( + self, + *, + inputs: typing.Dict[str, typing.Optional[typing.Any]], + mapping: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorMappingOutputInterface: + """ + Endpoint to map the experiment data tree to evaluator interface. + + Args: + request (Request): The request object. + payload (EvaluatorMappingInputInterface): The payload containing the request data. + + Returns: + EvaluatorMappingOutputInterface: the evaluator mapping output object + + Parameters + ---------- + inputs : typing.Dict[str, typing.Optional[typing.Any]] + + mapping : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorMappingOutputInterface + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.evaluator_data_map( + inputs={"key": "value"}, + mapping={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/map", + method="POST", + json={ + "inputs": inputs, + "mapping": mapping, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorMappingOutputInterface, + parse_obj_as( + type_=EvaluatorMappingOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def evaluator_run( + self, + evaluator_key: str, + *, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + credentials: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorOutputInterface: + """ + Endpoint to evaluate LLM app run + + Args: + request (Request): The request object. + evaluator_key (str): The key of the evaluator. + payload (EvaluatorInputInterface): The payload containing the request data. + + Returns: + result: EvaluatorOutputInterface object containing the outputs. + + Parameters + ---------- + evaluator_key : str + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorOutputInterface + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.evaluator_run( + evaluator_key="evaluator_key", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(evaluator_key)}/run", + method="POST", + json={ + "inputs": inputs, + "settings": settings, + "credentials": credentials, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorOutputInterface, + parse_obj_as( + type_=EvaluatorOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_evaluator_configs( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EvaluatorConfig]: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluatorConfig] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.get_evaluator_configs( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluatorConfig], + parse_obj_as( + type_=typing.List[EvaluatorConfig], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_new_evaluator_config( + self, + *, + app_id: str, + name: str, + evaluator_key: str, + settings_values: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + EvaluatorConfigDB: Evaluator configuration api model. + + Parameters + ---------- + app_id : str + + name : str + + evaluator_key : str + + settings_values : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.create_new_evaluator_config( + app_id="app_id", + name="name", + evaluator_key="evaluator_key", + settings_values={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="POST", + json={ + "app_id": app_id, + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.get_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_evaluator_config( + self, + evaluator_config_id: str, + *, + name: typing.Optional[str] = OMIT, + evaluator_key: typing.Optional[str] = OMIT, + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to update evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + name : typing.Optional[str] + + evaluator_key : typing.Optional[str] + + settings_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.update_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="PUT", + json={ + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Endpoint to delete a specific evaluator configuration. + + Args: + evaluator_config_id (str): The unique identifier of the evaluator configuration. + + Returns: + bool: True if deletion was successful, False otherwise. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.delete_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability/client.py new file mode 100644 index 0000000000..aebe134924 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability/client.py @@ -0,0 +1,1280 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.create_span import CreateSpan +from ..types.create_trace_response import CreateTraceResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..types.with_pagination import WithPagination +from ..types.trace_detail import TraceDetail +from ..core.jsonable_encoder import jsonable_encoder +from ..types.span_detail import SpanDetail +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ObservabilityClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def dashboard( + self, + *, + app_id: str, + time_range: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + time_range : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.dashboard( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/dashboard/", + method="GET", + params={ + "app_id": app_id, + "timeRange": time_range, + "environment": environment, + "variant": variant, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_traces( + self, + *, + trace: str, + spans: typing.Sequence[CreateSpan], + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateTraceResponse: + """ + Parameters + ---------- + trace : str + + spans : typing.Sequence[CreateSpan] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateTraceResponse + Successful Response + + Examples + -------- + import datetime + + from agenta import AgentaApi, CreateSpan + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.create_traces( + trace="trace", + spans=[ + CreateSpan( + id="id", + app_id="app_id", + name="name", + spankind="spankind", + status="status", + start_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + end_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + ) + ], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/trace/", + method="POST", + json={ + "trace": trace, + "spans": convert_and_respect_annotation_metadata( + object_=spans, + annotation=typing.Sequence[CreateSpan], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateTraceResponse, + parse_obj_as( + type_=CreateTraceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_traces( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> WithPagination: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WithPagination + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_traces( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/traces/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WithPagination, + parse_obj_as( + type_=WithPagination, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_traces_legacy( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.delete_traces_legacy( + request=["string"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/traces/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_trace_detail( + self, trace_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> TraceDetail: + """ + Parameters + ---------- + trace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TraceDetail + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_trace_detail( + trace_id="trace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"observability/traces/{jsonable_encoder(trace_id)}/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TraceDetail, + parse_obj_as( + type_=TraceDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_spans_of_generation( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_spans_of_generation( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/spans/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_spans_of_trace( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.delete_spans_of_trace( + request=["string"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/spans/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_span_of_generation( + self, + span_id: str, + *, + type: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SpanDetail: + """ + Parameters + ---------- + span_id : str + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SpanDetail + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_span_of_generation( + span_id="span_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"observability/spans/{jsonable_encoder(span_id)}/", + method="GET", + params={ + "type": type, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SpanDetail, + parse_obj_as( + type_=SpanDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncObservabilityClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def dashboard( + self, + *, + app_id: str, + time_range: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + time_range : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.dashboard( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/dashboard/", + method="GET", + params={ + "app_id": app_id, + "timeRange": time_range, + "environment": environment, + "variant": variant, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_traces( + self, + *, + trace: str, + spans: typing.Sequence[CreateSpan], + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateTraceResponse: + """ + Parameters + ---------- + trace : str + + spans : typing.Sequence[CreateSpan] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateTraceResponse + Successful Response + + Examples + -------- + import asyncio + import datetime + + from agenta import AsyncAgentaApi, CreateSpan + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.create_traces( + trace="trace", + spans=[ + CreateSpan( + id="id", + app_id="app_id", + name="name", + spankind="spankind", + status="status", + start_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + end_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/trace/", + method="POST", + json={ + "trace": trace, + "spans": convert_and_respect_annotation_metadata( + object_=spans, + annotation=typing.Sequence[CreateSpan], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateTraceResponse, + parse_obj_as( + type_=CreateTraceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_traces( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> WithPagination: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WithPagination + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_traces( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/traces/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WithPagination, + parse_obj_as( + type_=WithPagination, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_traces_legacy( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.delete_traces_legacy( + request=["string"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/traces/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_trace_detail( + self, trace_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> TraceDetail: + """ + Parameters + ---------- + trace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TraceDetail + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_trace_detail( + trace_id="trace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"observability/traces/{jsonable_encoder(trace_id)}/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TraceDetail, + parse_obj_as( + type_=TraceDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_spans_of_generation( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_spans_of_generation( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/spans/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_spans_of_trace( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.delete_spans_of_trace( + request=["string"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/spans/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_span_of_generation( + self, + span_id: str, + *, + type: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SpanDetail: + """ + Parameters + ---------- + span_id : str + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SpanDetail + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_span_of_generation( + span_id="span_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"observability/spans/{jsonable_encoder(span_id)}/", + method="GET", + params={ + "type": type, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SpanDetail, + parse_obj_as( + type_=SpanDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/__init__.py new file mode 100644 index 0000000000..aceeca0c75 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import Format, QueryTracesResponse + +__all__ = ["Format", "QueryTracesResponse"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/client.py new file mode 100644 index 0000000000..5fa38f8c3c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/client.py @@ -0,0 +1,560 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import SyncClientWrapper +import typing +from ..core.request_options import RequestOptions +from ..types.collect_status_response import CollectStatusResponse +from ..core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .types.format import Format +from .types.query_traces_response import QueryTracesResponse +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..core.client_wrapper import AsyncClientWrapper + + +class ObservabilityV1Client: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def otlp_status( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Status of OTLP endpoint. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.otlp_status() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def otlp_receiver( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Receive traces via OTLP. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.otlp_receiver() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def query_traces( + self, + *, + format: typing.Optional[Format] = None, + focus: typing.Optional[str] = None, + oldest: typing.Optional[str] = None, + newest: typing.Optional[str] = None, + filtering: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + next: typing.Optional[str] = None, + stop: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QueryTracesResponse: + """ + Query traces, with optional grouping, windowing, filtering, and pagination. + + Parameters + ---------- + format : typing.Optional[Format] + + focus : typing.Optional[str] + + oldest : typing.Optional[str] + + newest : typing.Optional[str] + + filtering : typing.Optional[str] + + page : typing.Optional[int] + + size : typing.Optional[int] + + next : typing.Optional[str] + + stop : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QueryTracesResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.query_traces() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="GET", + params={ + "format": format, + "focus": focus, + "oldest": oldest, + "newest": newest, + "filtering": filtering, + "page": page, + "size": size, + "next": next, + "stop": stop, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QueryTracesResponse, + parse_obj_as( + type_=QueryTracesResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_traces( + self, + *, + node_id: typing.Optional[str] = None, + node_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CollectStatusResponse: + """ + Delete trace. + + Parameters + ---------- + node_id : typing.Optional[str] + + node_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.delete_traces() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="DELETE", + params={ + "node_id": node_id, + "node_ids": node_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncObservabilityV1Client: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def otlp_status( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Status of OTLP endpoint. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.otlp_status() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def otlp_receiver( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Receive traces via OTLP. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.otlp_receiver() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def query_traces( + self, + *, + format: typing.Optional[Format] = None, + focus: typing.Optional[str] = None, + oldest: typing.Optional[str] = None, + newest: typing.Optional[str] = None, + filtering: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + next: typing.Optional[str] = None, + stop: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QueryTracesResponse: + """ + Query traces, with optional grouping, windowing, filtering, and pagination. + + Parameters + ---------- + format : typing.Optional[Format] + + focus : typing.Optional[str] + + oldest : typing.Optional[str] + + newest : typing.Optional[str] + + filtering : typing.Optional[str] + + page : typing.Optional[int] + + size : typing.Optional[int] + + next : typing.Optional[str] + + stop : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QueryTracesResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.query_traces() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="GET", + params={ + "format": format, + "focus": focus, + "oldest": oldest, + "newest": newest, + "filtering": filtering, + "page": page, + "size": size, + "next": next, + "stop": stop, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QueryTracesResponse, + parse_obj_as( + type_=QueryTracesResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_traces( + self, + *, + node_id: typing.Optional[str] = None, + node_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CollectStatusResponse: + """ + Delete trace. + + Parameters + ---------- + node_id : typing.Optional[str] + + node_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.delete_traces() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="DELETE", + params={ + "node_id": node_id, + "node_ids": node_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/__init__.py new file mode 100644 index 0000000000..7303a90f08 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/__init__.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +from .format import Format +from .query_traces_response import QueryTracesResponse + +__all__ = ["Format", "QueryTracesResponse"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/format.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/format.py new file mode 100644 index 0000000000..ed6f7db216 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Format = typing.Union[typing.Literal["opentelemetry", "agenta"], typing.Any] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/query_traces_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/query_traces_response.py new file mode 100644 index 0000000000..4219a5b7e9 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/observability_v_1/types/query_traces_response.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.o_tel_spans_response import OTelSpansResponse +from ...types.agenta_nodes_response import AgentaNodesResponse +from ...types.agenta_trees_response import AgentaTreesResponse +from ...types.agenta_roots_response import AgentaRootsResponse + +QueryTracesResponse = typing.Union[ + OTelSpansResponse, AgentaNodesResponse, AgentaTreesResponse, AgentaRootsResponse +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/testsets/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/testsets/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/testsets/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/testsets/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/testsets/client.py new file mode 100644 index 0000000000..fc10205700 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/testsets/client.py @@ -0,0 +1,1100 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .. import core +from ..core.request_options import RequestOptions +from ..types.test_set_simple_response import TestSetSimpleResponse +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.jsonable_encoder import jsonable_encoder +from ..types.test_set_output_response import TestSetOutputResponse +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class TestsetsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def upload_file( + self, + *, + file: core.File, + upload_type: typing.Optional[str] = OMIT, + testset_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Uploads a CSV or JSON file and saves its data to MongoDB. + + Args: + upload_type : Either a json or csv file. + file (UploadFile): The CSV or JSON file to upload. + testset_name (Optional): the name of the testset if provided. + + Returns: + dict: The result of the upload process. + + Parameters + ---------- + file : core.File + See core.File for more documentation + + upload_type : typing.Optional[str] + + testset_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.upload_file() + """ + _response = self._client_wrapper.httpx_client.request( + "testsets/upload", + method="POST", + data={ + "upload_type": upload_type, + "testset_name": testset_name, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def import_testset( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TestSetSimpleResponse: + """ + Import JSON testset data from an endpoint and save it to MongoDB. + + Args: + endpoint (str): An endpoint URL to import data from. + testset_name (str): the name of the testset if provided. + + Returns: + dict: The result of the import process. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.import_testset() + """ + _response = self._client_wrapper.httpx_client.request( + "testsets/endpoint", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_testset( + self, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Create a testset with given name, save the testset to MongoDB. + + Args: + name (str): name of the test set. + testset (Dict[str, str]): test set data. + + Returns: + str: The id of the test set created. + + Parameters + ---------- + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.create_testset( + name="name", + csvdata=[{"key": "value"}], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "testsets", + method="POST", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_single_testset( + self, + testset_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetch a specific testset in a MongoDB collection using its \_id. + + Args: + testset_id (str): The \_id of the testset to fetch. + + Returns: + The requested testset if found, else an HTTPException. + + Parameters + ---------- + testset_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.get_single_testset( + testset_id="testset_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_testset( + self, + testset_id: str, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Update a testset with given id, update the testset in MongoDB. + + Args: + testset_id (str): id of the test set to be updated. + csvdata (NewTestset): New data to replace the old testset. + + Returns: + str: The id of the test set updated. + + Parameters + ---------- + testset_id : str + + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.update_testset( + testset_id="testset_id", + name="name", + csvdata=[{"key": "value"}], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="PUT", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_testsets( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[TestSetOutputResponse]: + """ + Get all testsets. + + Returns: + + - A list of testset objects. + + Raises: + + - `HTTPException` with status code 404 if no testsets are found. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[TestSetOutputResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.get_testsets() + """ + _response = self._client_wrapper.httpx_client.request( + "testsets", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[TestSetOutputResponse], + parse_obj_as( + type_=typing.List[TestSetOutputResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_testsets( + self, + *, + testset_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific testsets based on their unique IDs. + + Args: + testset_ids (List[str]): The unique identifiers of the testsets to delete. + + Returns: + A list of the deleted testsets' IDs. + + Parameters + ---------- + testset_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.delete_testsets( + testset_ids=["testset_ids"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "testsets", + method="DELETE", + json={ + "testset_ids": testset_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncTestsetsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def upload_file( + self, + *, + file: core.File, + upload_type: typing.Optional[str] = OMIT, + testset_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Uploads a CSV or JSON file and saves its data to MongoDB. + + Args: + upload_type : Either a json or csv file. + file (UploadFile): The CSV or JSON file to upload. + testset_name (Optional): the name of the testset if provided. + + Returns: + dict: The result of the upload process. + + Parameters + ---------- + file : core.File + See core.File for more documentation + + upload_type : typing.Optional[str] + + testset_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.upload_file() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets/upload", + method="POST", + data={ + "upload_type": upload_type, + "testset_name": testset_name, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def import_testset( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TestSetSimpleResponse: + """ + Import JSON testset data from an endpoint and save it to MongoDB. + + Args: + endpoint (str): An endpoint URL to import data from. + testset_name (str): the name of the testset if provided. + + Returns: + dict: The result of the import process. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.import_testset() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets/endpoint", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_testset( + self, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Create a testset with given name, save the testset to MongoDB. + + Args: + name (str): name of the test set. + testset (Dict[str, str]): test set data. + + Returns: + str: The id of the test set created. + + Parameters + ---------- + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.create_testset( + name="name", + csvdata=[{"key": "value"}], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets", + method="POST", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_single_testset( + self, + testset_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetch a specific testset in a MongoDB collection using its \_id. + + Args: + testset_id (str): The \_id of the testset to fetch. + + Returns: + The requested testset if found, else an HTTPException. + + Parameters + ---------- + testset_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.get_single_testset( + testset_id="testset_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_testset( + self, + testset_id: str, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Update a testset with given id, update the testset in MongoDB. + + Args: + testset_id (str): id of the test set to be updated. + csvdata (NewTestset): New data to replace the old testset. + + Returns: + str: The id of the test set updated. + + Parameters + ---------- + testset_id : str + + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.update_testset( + testset_id="testset_id", + name="name", + csvdata=[{"key": "value"}], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="PUT", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_testsets( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[TestSetOutputResponse]: + """ + Get all testsets. + + Returns: + + - A list of testset objects. + + Raises: + + - `HTTPException` with status code 404 if no testsets are found. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[TestSetOutputResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.get_testsets() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[TestSetOutputResponse], + parse_obj_as( + type_=typing.List[TestSetOutputResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_testsets( + self, + *, + testset_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific testsets based on their unique IDs. + + Args: + testset_ids (List[str]): The unique identifiers of the testsets to delete. + + Returns: + A list of the deleted testsets' IDs. + + Parameters + ---------- + testset_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.delete_testsets( + testset_ids=["testset_ids"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets", + method="DELETE", + json={ + "testset_ids": testset_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/__init__.py new file mode 100644 index 0000000000..b10c09c61b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/__init__.py @@ -0,0 +1,219 @@ +# This file was auto-generated by Fern from our API Definition. + +from .agenta_node_dto import AgentaNodeDto +from .agenta_node_dto_nodes_value import AgentaNodeDtoNodesValue +from .agenta_nodes_response import AgentaNodesResponse +from .agenta_root_dto import AgentaRootDto +from .agenta_roots_response import AgentaRootsResponse +from .agenta_tree_dto import AgentaTreeDto +from .agenta_trees_response import AgentaTreesResponse +from .aggregated_result import AggregatedResult +from .aggregated_result_evaluator_config import AggregatedResultEvaluatorConfig +from .app import App +from .app_variant_response import AppVariantResponse +from .app_variant_revision import AppVariantRevision +from .base_output import BaseOutput +from .body_import_testset import BodyImportTestset +from .collect_status_response import CollectStatusResponse +from .config_db import ConfigDb +from .config_dto import ConfigDto +from .config_response_model import ConfigResponseModel +from .correct_answer import CorrectAnswer +from .create_app_output import CreateAppOutput +from .create_span import CreateSpan +from .create_trace_response import CreateTraceResponse +from .docker_env_vars import DockerEnvVars +from .environment_output import EnvironmentOutput +from .environment_output_extended import EnvironmentOutputExtended +from .environment_revision import EnvironmentRevision +from .error import Error +from .evaluation import Evaluation +from .evaluation_scenario import EvaluationScenario +from .evaluation_scenario_input import EvaluationScenarioInput +from .evaluation_scenario_output import EvaluationScenarioOutput +from .evaluation_scenario_result import EvaluationScenarioResult +from .evaluation_scenario_score_update import EvaluationScenarioScoreUpdate +from .evaluation_status_enum import EvaluationStatusEnum +from .evaluation_type import EvaluationType +from .evaluator import Evaluator +from .evaluator_config import EvaluatorConfig +from .evaluator_mapping_output_interface import EvaluatorMappingOutputInterface +from .evaluator_output_interface import EvaluatorOutputInterface +from .exception_dto import ExceptionDto +from .get_config_response import GetConfigResponse +from .http_validation_error import HttpValidationError +from .human_evaluation import HumanEvaluation +from .human_evaluation_scenario import HumanEvaluationScenario +from .human_evaluation_scenario_input import HumanEvaluationScenarioInput +from .human_evaluation_scenario_output import HumanEvaluationScenarioOutput +from .human_evaluation_scenario_update import HumanEvaluationScenarioUpdate +from .human_evaluation_update import HumanEvaluationUpdate +from .image import Image +from .invite_request import InviteRequest +from .lifecycle_dto import LifecycleDto +from .link_dto import LinkDto +from .list_api_keys_response import ListApiKeysResponse +from .llm_run_rate_limit import LlmRunRateLimit +from .llm_tokens import LlmTokens +from .lm_providers_enum import LmProvidersEnum +from .new_human_evaluation import NewHumanEvaluation +from .new_testset import NewTestset +from .node_dto import NodeDto +from .node_type import NodeType +from .o_tel_context_dto import OTelContextDto +from .o_tel_event_dto import OTelEventDto +from .o_tel_extra_dto import OTelExtraDto +from .o_tel_link_dto import OTelLinkDto +from .o_tel_span_dto import OTelSpanDto +from .o_tel_span_kind import OTelSpanKind +from .o_tel_spans_response import OTelSpansResponse +from .o_tel_status_code import OTelStatusCode +from .organization import Organization +from .organization_output import OrganizationOutput +from .outputs import Outputs +from .parent_dto import ParentDto +from .permission import Permission +from .reference_dto import ReferenceDto +from .reference_request_model import ReferenceRequestModel +from .result import Result +from .root_dto import RootDto +from .score import Score +from .simple_evaluation_output import SimpleEvaluationOutput +from .span import Span +from .span_detail import SpanDetail +from .span_dto import SpanDto +from .span_dto_nodes_value import SpanDtoNodesValue +from .span_status_code import SpanStatusCode +from .span_variant import SpanVariant +from .status_code import StatusCode +from .status_dto import StatusDto +from .template import Template +from .template_image_info import TemplateImageInfo +from .test_set_output_response import TestSetOutputResponse +from .test_set_simple_response import TestSetSimpleResponse +from .time_dto import TimeDto +from .trace_detail import TraceDetail +from .tree_dto import TreeDto +from .tree_type import TreeType +from .update_app_output import UpdateAppOutput +from .uri import Uri +from .validation_error import ValidationError +from .validation_error_loc_item import ValidationErrorLocItem +from .variant_action import VariantAction +from .variant_action_enum import VariantActionEnum +from .with_pagination import WithPagination +from .workspace_member_response import WorkspaceMemberResponse +from .workspace_permission import WorkspacePermission +from .workspace_response import WorkspaceResponse +from .workspace_role import WorkspaceRole +from .workspace_role_response import WorkspaceRoleResponse + +__all__ = [ + "AgentaNodeDto", + "AgentaNodeDtoNodesValue", + "AgentaNodesResponse", + "AgentaRootDto", + "AgentaRootsResponse", + "AgentaTreeDto", + "AgentaTreesResponse", + "AggregatedResult", + "AggregatedResultEvaluatorConfig", + "App", + "AppVariantResponse", + "AppVariantRevision", + "BaseOutput", + "BodyImportTestset", + "CollectStatusResponse", + "ConfigDb", + "ConfigDto", + "ConfigResponseModel", + "CorrectAnswer", + "CreateAppOutput", + "CreateSpan", + "CreateTraceResponse", + "DockerEnvVars", + "EnvironmentOutput", + "EnvironmentOutputExtended", + "EnvironmentRevision", + "Error", + "Evaluation", + "EvaluationScenario", + "EvaluationScenarioInput", + "EvaluationScenarioOutput", + "EvaluationScenarioResult", + "EvaluationScenarioScoreUpdate", + "EvaluationStatusEnum", + "EvaluationType", + "Evaluator", + "EvaluatorConfig", + "EvaluatorMappingOutputInterface", + "EvaluatorOutputInterface", + "ExceptionDto", + "GetConfigResponse", + "HttpValidationError", + "HumanEvaluation", + "HumanEvaluationScenario", + "HumanEvaluationScenarioInput", + "HumanEvaluationScenarioOutput", + "HumanEvaluationScenarioUpdate", + "HumanEvaluationUpdate", + "Image", + "InviteRequest", + "LifecycleDto", + "LinkDto", + "ListApiKeysResponse", + "LlmRunRateLimit", + "LlmTokens", + "LmProvidersEnum", + "NewHumanEvaluation", + "NewTestset", + "NodeDto", + "NodeType", + "OTelContextDto", + "OTelEventDto", + "OTelExtraDto", + "OTelLinkDto", + "OTelSpanDto", + "OTelSpanKind", + "OTelSpansResponse", + "OTelStatusCode", + "Organization", + "OrganizationOutput", + "Outputs", + "ParentDto", + "Permission", + "ReferenceDto", + "ReferenceRequestModel", + "Result", + "RootDto", + "Score", + "SimpleEvaluationOutput", + "Span", + "SpanDetail", + "SpanDto", + "SpanDtoNodesValue", + "SpanStatusCode", + "SpanVariant", + "StatusCode", + "StatusDto", + "Template", + "TemplateImageInfo", + "TestSetOutputResponse", + "TestSetSimpleResponse", + "TimeDto", + "TraceDetail", + "TreeDto", + "TreeType", + "UpdateAppOutput", + "Uri", + "ValidationError", + "ValidationErrorLocItem", + "VariantAction", + "VariantActionEnum", + "WithPagination", + "WorkspaceMemberResponse", + "WorkspacePermission", + "WorkspaceResponse", + "WorkspaceRole", + "WorkspaceRoleResponse", +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_node_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_node_dto.py new file mode 100644 index 0000000000..8f8c933eac --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_node_dto.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lifecycle_dto import LifecycleDto +from .root_dto import RootDto +from .tree_dto import TreeDto +from .node_dto import NodeDto +from .parent_dto import ParentDto +from .time_dto import TimeDto +from .status_dto import StatusDto +from .exception_dto import ExceptionDto +from .link_dto import LinkDto +from .o_tel_extra_dto import OTelExtraDto +from .agenta_node_dto_nodes_value import AgentaNodeDtoNodesValue +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentaNodeDto(UniversalBaseModel): + lifecycle: typing.Optional[LifecycleDto] = None + root: RootDto + tree: TreeDto + node: NodeDto + parent: typing.Optional[ParentDto] = None + time: TimeDto + status: StatusDto + exception: typing.Optional[ExceptionDto] = None + data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + metrics: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + meta: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + refs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + links: typing.Optional[typing.List[LinkDto]] = None + otel: typing.Optional[OTelExtraDto] = None + nodes: typing.Optional[ + typing.Dict[str, typing.Optional[AgentaNodeDtoNodesValue]] + ] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_node_dto_nodes_value.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_node_dto_nodes_value.py new file mode 100644 index 0000000000..771c4f8e9f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_node_dto_nodes_value.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .span_dto import SpanDto + +AgentaNodeDtoNodesValue = typing.Union[SpanDto, typing.List[SpanDto]] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_nodes_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_nodes_response.py new file mode 100644 index 0000000000..37b8fea8f7 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_nodes_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +import typing +from .agenta_node_dto import AgentaNodeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaNodesResponse(UniversalBaseModel): + nodes: typing.List[AgentaNodeDto] + version: str + count: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaNodesResponse=AgentaNodesResponse) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_root_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_root_dto.py new file mode 100644 index 0000000000..04e57ee7e3 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_root_dto.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +from .root_dto import RootDto +import typing +from .agenta_tree_dto import AgentaTreeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaRootDto(UniversalBaseModel): + root: RootDto + trees: typing.List[AgentaTreeDto] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaRootDto=AgentaRootDto) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_roots_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_roots_response.py new file mode 100644 index 0000000000..df3d3ba50b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_roots_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +import typing +from .agenta_root_dto import AgentaRootDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaRootsResponse(UniversalBaseModel): + roots: typing.List[AgentaRootDto] + version: str + count: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaRootsResponse=AgentaRootsResponse) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_tree_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_tree_dto.py new file mode 100644 index 0000000000..9e5ced12b5 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_tree_dto.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +from .tree_dto import TreeDto +import typing +from .agenta_node_dto import AgentaNodeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaTreeDto(UniversalBaseModel): + tree: TreeDto + nodes: typing.List[AgentaNodeDto] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaTreeDto=AgentaTreeDto) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_trees_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_trees_response.py new file mode 100644 index 0000000000..f6e80b5b51 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/agenta_trees_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +import typing +from .agenta_tree_dto import AgentaTreeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaTreesResponse(UniversalBaseModel): + trees: typing.List[AgentaTreeDto] + version: str + count: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaTreesResponse=AgentaTreesResponse) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/aggregated_result.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/aggregated_result.py new file mode 100644 index 0000000000..808bddac66 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/aggregated_result.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .aggregated_result_evaluator_config import AggregatedResultEvaluatorConfig +from .result import Result +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AggregatedResult(UniversalBaseModel): + evaluator_config: AggregatedResultEvaluatorConfig + result: Result + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/aggregated_result_evaluator_config.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/aggregated_result_evaluator_config.py new file mode 100644 index 0000000000..4a9069b4e8 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/aggregated_result_evaluator_config.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .evaluator_config import EvaluatorConfig + +AggregatedResultEvaluatorConfig = typing.Union[ + EvaluatorConfig, typing.Dict[str, typing.Optional[typing.Any]] +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/app.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/app.py new file mode 100644 index 0000000000..1a16548531 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/app.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class App(UniversalBaseModel): + app_id: str + app_name: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/app_variant_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/app_variant_response.py new file mode 100644 index 0000000000..d173208b14 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/app_variant_response.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AppVariantResponse(UniversalBaseModel): + app_id: str + app_name: str + variant_id: str + variant_name: str + project_id: str + parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + base_name: str + base_id: str + config_name: str + uri: typing.Optional[str] = None + revision: int + created_at: typing.Optional[str] = None + updated_at: typing.Optional[str] = None + modified_by_id: typing.Optional[str] = None + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/app_variant_revision.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/app_variant_revision.py new file mode 100644 index 0000000000..fbb9c6c7f0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/app_variant_revision.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .config_db import ConfigDb +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AppVariantRevision(UniversalBaseModel): + revision: int + modified_by: str + config: ConfigDb + created_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/base_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/base_output.py new file mode 100644 index 0000000000..ba70c79376 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/base_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class BaseOutput(UniversalBaseModel): + base_id: str + base_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/body_import_testset.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/body_import_testset.py new file mode 100644 index 0000000000..12f7ad7453 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/body_import_testset.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class BodyImportTestset(UniversalBaseModel): + endpoint: typing.Optional[str] = None + testset_name: typing.Optional[str] = None + app_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/collect_status_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/collect_status_response.py new file mode 100644 index 0000000000..d52eed32ce --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/collect_status_response.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CollectStatusResponse(UniversalBaseModel): + version: str + status: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_db.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_db.py new file mode 100644 index 0000000000..c11ef70b91 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_db.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConfigDb(UniversalBaseModel): + config_name: str + parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_dto.py new file mode 100644 index 0000000000..6d012c8148 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_dto.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .reference_dto import ReferenceDto +from .lifecycle_dto import LifecycleDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConfigDto(UniversalBaseModel): + params: typing.Dict[str, typing.Optional[typing.Any]] + url: typing.Optional[str] = None + application_ref: typing.Optional[ReferenceDto] = None + service_ref: typing.Optional[ReferenceDto] = None + variant_ref: typing.Optional[ReferenceDto] = None + environment_ref: typing.Optional[ReferenceDto] = None + application_lifecycle: typing.Optional[LifecycleDto] = None + service_lifecycle: typing.Optional[LifecycleDto] = None + variant_lifecycle: typing.Optional[LifecycleDto] = None + environment_lifecycle: typing.Optional[LifecycleDto] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_response_model.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_response_model.py new file mode 100644 index 0000000000..45b6ffb420 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/config_response_model.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .reference_dto import ReferenceDto +from .lifecycle_dto import LifecycleDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConfigResponseModel(UniversalBaseModel): + params: typing.Dict[str, typing.Optional[typing.Any]] + url: typing.Optional[str] = None + application_ref: typing.Optional[ReferenceDto] = None + service_ref: typing.Optional[ReferenceDto] = None + variant_ref: typing.Optional[ReferenceDto] = None + environment_ref: typing.Optional[ReferenceDto] = None + application_lifecycle: typing.Optional[LifecycleDto] = None + service_lifecycle: typing.Optional[LifecycleDto] = None + variant_lifecycle: typing.Optional[LifecycleDto] = None + environment_lifecycle: typing.Optional[LifecycleDto] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/correct_answer.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/correct_answer.py new file mode 100644 index 0000000000..f0d8340c70 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/correct_answer.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CorrectAnswer(UniversalBaseModel): + key: str + value: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_app_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_app_output.py new file mode 100644 index 0000000000..0cfa349505 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_app_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CreateAppOutput(UniversalBaseModel): + app_id: str + app_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_span.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_span.py new file mode 100644 index 0000000000..edc44590ad --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_span.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .outputs import Outputs +import datetime as dt +from .llm_tokens import LlmTokens +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class CreateSpan(UniversalBaseModel): + id: str + app_id: str + project_id: typing.Optional[str] = None + variant_id: typing.Optional[str] = None + variant_name: typing.Optional[str] = None + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + internals: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + outputs: typing.Optional[Outputs] = None + config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + environment: typing.Optional[str] = None + tags: typing.Optional[typing.List[str]] = None + token_consumption: typing.Optional[int] = None + name: str + parent_span_id: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + spankind: str + status: str + user: typing.Optional[str] = None + start_time: dt.datetime + end_time: dt.datetime + tokens: typing.Optional[LlmTokens] = None + cost: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_trace_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_trace_response.py new file mode 100644 index 0000000000..41e67fa7ca --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/create_trace_response.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class CreateTraceResponse(UniversalBaseModel): + message: str + data: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/docker_env_vars.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/docker_env_vars.py new file mode 100644 index 0000000000..71da4ea95b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/docker_env_vars.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class DockerEnvVars(UniversalBaseModel): + env_vars: typing.Dict[str, str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_output.py new file mode 100644 index 0000000000..57d1216bfd --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_output.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EnvironmentOutput(UniversalBaseModel): + name: str + app_id: str + project_id: str + deployed_app_variant_id: typing.Optional[str] = None + deployed_variant_name: typing.Optional[str] = None + deployed_app_variant_revision_id: typing.Optional[str] = None + revision: typing.Optional[int] = None + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_output_extended.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_output_extended.py new file mode 100644 index 0000000000..1b0ef8b470 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_output_extended.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .environment_revision import EnvironmentRevision +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EnvironmentOutputExtended(UniversalBaseModel): + name: str + app_id: str + project_id: str + deployed_app_variant_id: typing.Optional[str] = None + deployed_variant_name: typing.Optional[str] = None + deployed_app_variant_revision_id: typing.Optional[str] = None + revision: typing.Optional[int] = None + revisions: typing.List[EnvironmentRevision] + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_revision.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_revision.py new file mode 100644 index 0000000000..a1e2d29231 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/environment_revision.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EnvironmentRevision(UniversalBaseModel): + id: str + revision: int + modified_by: str + deployed_app_variant_revision: typing.Optional[str] = None + deployment: typing.Optional[str] = None + created_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/error.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/error.py new file mode 100644 index 0000000000..4b47a34d06 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/error.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Error(UniversalBaseModel): + message: str + stacktrace: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation.py new file mode 100644 index 0000000000..75c947ee11 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .result import Result +from .aggregated_result import AggregatedResult +import datetime as dt +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Evaluation(UniversalBaseModel): + id: str + app_id: str + project_id: str + variant_ids: typing.List[str] + variant_names: typing.List[str] + variant_revision_ids: typing.List[str] + revisions: typing.List[str] + testset_id: typing.Optional[str] = None + testset_name: typing.Optional[str] = None + status: Result + aggregated_results: typing.List[AggregatedResult] + average_cost: typing.Optional[Result] = None + total_cost: typing.Optional[Result] = None + average_latency: typing.Optional[Result] = None + created_at: dt.datetime + updated_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario.py new file mode 100644 index 0000000000..1a856e7a24 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_scenario_input import EvaluationScenarioInput +from .evaluation_scenario_output import EvaluationScenarioOutput +from .correct_answer import CorrectAnswer +from .evaluation_scenario_result import EvaluationScenarioResult +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationScenario(UniversalBaseModel): + id: typing.Optional[str] = None + evaluation_id: str + inputs: typing.List[EvaluationScenarioInput] + outputs: typing.List[EvaluationScenarioOutput] + correct_answers: typing.Optional[typing.List[CorrectAnswer]] = None + is_pinned: typing.Optional[bool] = None + note: typing.Optional[str] = None + results: typing.List[EvaluationScenarioResult] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_input.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_input.py new file mode 100644 index 0000000000..bbc89f33c0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_input.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationScenarioInput(UniversalBaseModel): + name: str + type: str + value: typing.Optional[typing.Any] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_output.py new file mode 100644 index 0000000000..4c1f489f59 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_output.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .result import Result +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationScenarioOutput(UniversalBaseModel): + result: Result + cost: typing.Optional[float] = None + latency: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_result.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_result.py new file mode 100644 index 0000000000..8bb8f3e9ea --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_result.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .result import Result +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class EvaluationScenarioResult(UniversalBaseModel): + evaluator_config: str + result: Result + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_score_update.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_score_update.py new file mode 100644 index 0000000000..f76d9f6d22 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_scenario_score_update.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class EvaluationScenarioScoreUpdate(UniversalBaseModel): + score: float + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_status_enum.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_status_enum.py new file mode 100644 index 0000000000..96aa452f19 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_status_enum.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationStatusEnum = typing.Union[ + typing.Literal[ + "EVALUATION_INITIALIZED", + "EVALUATION_STARTED", + "EVALUATION_FINISHED", + "EVALUATION_FINISHED_WITH_ERRORS", + "EVALUATION_FAILED", + "EVALUATION_AGGREGATION_FAILED", + ], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_type.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_type.py new file mode 100644 index 0000000000..ef63bb721d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluation_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationType = typing.Union[ + typing.Literal["human_a_b_testing", "single_model_test"], typing.Any +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator.py new file mode 100644 index 0000000000..bafa1ba61d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Evaluator(UniversalBaseModel): + name: str + key: str + direct_use: bool + settings_template: typing.Dict[str, typing.Optional[typing.Any]] + description: typing.Optional[str] = None + oss: typing.Optional[bool] = None + requires_llm_api_keys: typing.Optional[bool] = None + tags: typing.List[str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_config.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_config.py new file mode 100644 index 0000000000..59fe19fc40 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_config.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluatorConfig(UniversalBaseModel): + id: str + name: str + project_id: str + evaluator_key: str + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = None + created_at: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_mapping_output_interface.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_mapping_output_interface.py new file mode 100644 index 0000000000..91cf2f87d4 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_mapping_output_interface.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluatorMappingOutputInterface(UniversalBaseModel): + outputs: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_output_interface.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_output_interface.py new file mode 100644 index 0000000000..acd295c331 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/evaluator_output_interface.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluatorOutputInterface(UniversalBaseModel): + outputs: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/exception_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/exception_dto.py new file mode 100644 index 0000000000..a3e780d345 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/exception_dto.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import datetime as dt +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ExceptionDto(UniversalBaseModel): + timestamp: dt.datetime + type: str + message: typing.Optional[str] = None + stacktrace: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/get_config_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/get_config_response.py new file mode 100644 index 0000000000..6f65d16d13 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/get_config_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class GetConfigResponse(UniversalBaseModel): + config_name: str + current_version: int + parameters: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/http_validation_error.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/http_validation_error.py new file mode 100644 index 0000000000..988ebed269 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/http_validation_error.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .validation_error import ValidationError +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HttpValidationError(UniversalBaseModel): + detail: typing.Optional[typing.List[ValidationError]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation.py new file mode 100644 index 0000000000..9d9ba8b490 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluation(UniversalBaseModel): + id: str + app_id: str + project_id: str + evaluation_type: str + variant_ids: typing.List[str] + variant_names: typing.List[str] + variants_revision_ids: typing.List[str] + revisions: typing.List[str] + testset_id: str + testset_name: str + status: str + created_at: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario.py new file mode 100644 index 0000000000..1321dde09b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .human_evaluation_scenario_input import HumanEvaluationScenarioInput +from .human_evaluation_scenario_output import HumanEvaluationScenarioOutput +from .score import Score +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluationScenario(UniversalBaseModel): + id: typing.Optional[str] = None + evaluation_id: str + inputs: typing.List[HumanEvaluationScenarioInput] + outputs: typing.List[HumanEvaluationScenarioOutput] + vote: typing.Optional[str] = None + score: typing.Optional[Score] = None + correct_answer: typing.Optional[str] = None + is_pinned: typing.Optional[bool] = None + note: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_input.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_input.py new file mode 100644 index 0000000000..e2fa9b4082 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_input.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class HumanEvaluationScenarioInput(UniversalBaseModel): + input_name: str + input_value: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_output.py new file mode 100644 index 0000000000..affac190e1 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class HumanEvaluationScenarioOutput(UniversalBaseModel): + variant_id: str + variant_output: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_update.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_update.py new file mode 100644 index 0000000000..6bf28fd914 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_scenario_update.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .score import Score +from .human_evaluation_scenario_output import HumanEvaluationScenarioOutput +from .human_evaluation_scenario_input import HumanEvaluationScenarioInput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluationScenarioUpdate(UniversalBaseModel): + vote: typing.Optional[str] = None + score: typing.Optional[Score] = None + correct_answer: typing.Optional[str] = None + outputs: typing.Optional[typing.List[HumanEvaluationScenarioOutput]] = None + inputs: typing.Optional[typing.List[HumanEvaluationScenarioInput]] = None + is_pinned: typing.Optional[bool] = None + note: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_update.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_update.py new file mode 100644 index 0000000000..78a453195f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/human_evaluation_update.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_status_enum import EvaluationStatusEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluationUpdate(UniversalBaseModel): + status: typing.Optional[EvaluationStatusEnum] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/image.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/image.py new file mode 100644 index 0000000000..f8c17d044e --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/image.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Image(UniversalBaseModel): + type: typing.Optional[str] = None + docker_id: str + tags: str + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/invite_request.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/invite_request.py new file mode 100644 index 0000000000..9c16852671 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/invite_request.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class InviteRequest(UniversalBaseModel): + email: str + roles: typing.List[str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/lifecycle_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/lifecycle_dto.py new file mode 100644 index 0000000000..098f83505e --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/lifecycle_dto.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LifecycleDto(UniversalBaseModel): + created_at: typing.Optional[str] = None + updated_at: typing.Optional[str] = None + updated_by_id: typing.Optional[str] = None + updated_by: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/link_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/link_dto.py new file mode 100644 index 0000000000..91c76de759 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/link_dto.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .tree_type import TreeType +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LinkDto(UniversalBaseModel): + type: TreeType = "invocation" + id: str + tree_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/list_api_keys_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/list_api_keys_response.py new file mode 100644 index 0000000000..0ed671ca27 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/list_api_keys_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ListApiKeysResponse(UniversalBaseModel): + prefix: str + created_at: str + last_used_at: typing.Optional[str] = None + expiration_date: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/llm_run_rate_limit.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/llm_run_rate_limit.py new file mode 100644 index 0000000000..b0c9f46899 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/llm_run_rate_limit.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class LlmRunRateLimit(UniversalBaseModel): + batch_size: int + max_retries: int + retry_delay: int + delay_between_batches: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/llm_tokens.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/llm_tokens.py new file mode 100644 index 0000000000..7336d8d561 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/llm_tokens.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LlmTokens(UniversalBaseModel): + prompt_tokens: typing.Optional[int] = None + completion_tokens: typing.Optional[int] = None + total_tokens: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/lm_providers_enum.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/lm_providers_enum.py new file mode 100644 index 0000000000..6aa756ba0e --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/lm_providers_enum.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LmProvidersEnum = typing.Union[ + typing.Literal[ + "OPENAI_API_KEY", + "MISTRAL_API_KEY", + "COHERE_API_KEY", + "ANTHROPIC_API_KEY", + "ANYSCALE_API_KEY", + "PERPLEXITYAI_API_KEY", + "DEEPINFRA_API_KEY", + "TOGETHERAI_API_KEY", + "ALEPHALPHA_API_KEY", + "OPENROUTER_API_KEY", + "GROQ_API_KEY", + "GEMINI_API_KEY", + ], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/new_human_evaluation.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/new_human_evaluation.py new file mode 100644 index 0000000000..9fde3d9e3c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/new_human_evaluation.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_type import EvaluationType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class NewHumanEvaluation(UniversalBaseModel): + app_id: str + variant_ids: typing.List[str] + evaluation_type: EvaluationType + inputs: typing.List[str] + testset_id: str + status: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/new_testset.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/new_testset.py new file mode 100644 index 0000000000..9643d26a1f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/new_testset.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class NewTestset(UniversalBaseModel): + name: str + csvdata: typing.List[typing.Dict[str, typing.Optional[typing.Any]]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/node_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/node_dto.py new file mode 100644 index 0000000000..6caa131c32 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/node_dto.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .node_type import NodeType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class NodeDto(UniversalBaseModel): + id: str + name: str + type: typing.Optional[NodeType] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/node_type.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/node_type.py new file mode 100644 index 0000000000..8abbe89309 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/node_type.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +NodeType = typing.Union[ + typing.Literal[ + "agent", + "workflow", + "chain", + "task", + "tool", + "embedding", + "query", + "completion", + "chat", + "rerank", + ], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_context_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_context_dto.py new file mode 100644 index 0000000000..ab99bfac46 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_context_dto.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class OTelContextDto(UniversalBaseModel): + trace_id: str + span_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_event_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_event_dto.py new file mode 100644 index 0000000000..e5eed83822 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_event_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelEventDto(UniversalBaseModel): + name: str + timestamp: str + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_extra_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_extra_dto.py new file mode 100644 index 0000000000..c7e9294db3 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_extra_dto.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .o_tel_event_dto import OTelEventDto +from .o_tel_link_dto import OTelLinkDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelExtraDto(UniversalBaseModel): + kind: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + events: typing.Optional[typing.List[OTelEventDto]] = None + links: typing.Optional[typing.List[OTelLinkDto]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_link_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_link_dto.py new file mode 100644 index 0000000000..75ec3d1f1b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_link_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .o_tel_context_dto import OTelContextDto +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelLinkDto(UniversalBaseModel): + context: OTelContextDto + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_span_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_span_dto.py new file mode 100644 index 0000000000..66632172c9 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_span_dto.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .o_tel_context_dto import OTelContextDto +import typing +from .o_tel_span_kind import OTelSpanKind +import datetime as dt +from .o_tel_status_code import OTelStatusCode +from .o_tel_event_dto import OTelEventDto +from .o_tel_link_dto import OTelLinkDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelSpanDto(UniversalBaseModel): + context: OTelContextDto + name: str + kind: typing.Optional[OTelSpanKind] = None + start_time: dt.datetime + end_time: dt.datetime + status_code: typing.Optional[OTelStatusCode] = None + status_message: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + events: typing.Optional[typing.List[OTelEventDto]] = None + parent: typing.Optional[OTelContextDto] = None + links: typing.Optional[typing.List[OTelLinkDto]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_span_kind.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_span_kind.py new file mode 100644 index 0000000000..98ba7bf43c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_span_kind.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OTelSpanKind = typing.Union[ + typing.Literal[ + "SPAN_KIND_UNSPECIFIED", + "SPAN_KIND_INTERNAL", + "SPAN_KIND_SERVER", + "SPAN_KIND_CLIENT", + "SPAN_KIND_PRODUCER", + "SPAN_KIND_CONSUMER", + ], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_spans_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_spans_response.py new file mode 100644 index 0000000000..b9fb641427 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_spans_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .o_tel_span_dto import OTelSpanDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelSpansResponse(UniversalBaseModel): + version: str + count: typing.Optional[int] = None + spans: typing.List[OTelSpanDto] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_status_code.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_status_code.py new file mode 100644 index 0000000000..d5a60e6006 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/o_tel_status_code.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OTelStatusCode = typing.Union[ + typing.Literal["STATUS_CODE_OK", "STATUS_CODE_ERROR", "STATUS_CODE_UNSET"], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/organization.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/organization.py new file mode 100644 index 0000000000..c6f12ee0c7 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/organization.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Organization(UniversalBaseModel): + id: str + name: str + description: str + type: typing.Optional[str] = None + owner: str + workspaces: typing.Optional[typing.List[str]] = None + members: typing.Optional[typing.List[str]] = None + invitations: typing.Optional[typing.List[typing.Optional[typing.Any]]] = None + is_paying: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/organization_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/organization_output.py new file mode 100644 index 0000000000..702802f814 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/organization_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class OrganizationOutput(UniversalBaseModel): + id: str + name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/outputs.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/outputs.py new file mode 100644 index 0000000000..f719851a4d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/outputs.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Outputs = typing.Union[typing.Dict[str, typing.Optional[typing.Any]], typing.List[str]] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/parent_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/parent_dto.py new file mode 100644 index 0000000000..7bf3c33715 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/parent_dto.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ParentDto(UniversalBaseModel): + id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/permission.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/permission.py new file mode 100644 index 0000000000..a32616dd16 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/permission.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Permission = typing.Union[ + typing.Literal[ + "read_system", + "view_application", + "edit_application", + "create_application", + "delete_application", + "create_app_variant", + "delete_app_variant", + "modify_variant_configurations", + "delete_application_variant", + "view_app_environment_deployment", + "edit_app_environment_deployment", + "create_app_environment_deployment", + "view_testset", + "edit_testset", + "create_testset", + "delete_testset", + "view_evaluation", + "run_evaluations", + "edit_evaluation", + "create_evaluation", + "delete_evaluation", + "deploy_application", + "view_workspace", + "edit_workspace", + "create_workspace", + "delete_workspace", + "modify_user_roles", + "add_new_user_to_workspace", + "edit_organization", + "delete_organization", + "add_new_user_to_organization", + ], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/reference_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/reference_dto.py new file mode 100644 index 0000000000..67fd56b0a8 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/reference_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ReferenceDto(UniversalBaseModel): + slug: typing.Optional[str] = None + version: typing.Optional[int] = None + id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/reference_request_model.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/reference_request_model.py new file mode 100644 index 0000000000..91d1ad80d5 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/reference_request_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ReferenceRequestModel(UniversalBaseModel): + slug: typing.Optional[str] = None + version: typing.Optional[int] = None + id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/result.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/result.py new file mode 100644 index 0000000000..e651345b9c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/result.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .error import Error +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Result(UniversalBaseModel): + type: str + value: typing.Optional[typing.Optional[typing.Any]] = None + error: typing.Optional[Error] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/root_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/root_dto.py new file mode 100644 index 0000000000..7b7e8f5aeb --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/root_dto.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class RootDto(UniversalBaseModel): + id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/score.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/score.py new file mode 100644 index 0000000000..8e90b82171 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/score.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Score = typing.Union[str, int] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/simple_evaluation_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/simple_evaluation_output.py new file mode 100644 index 0000000000..ae5997391c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/simple_evaluation_output.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_type import EvaluationType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SimpleEvaluationOutput(UniversalBaseModel): + id: str + variant_ids: typing.List[str] + app_id: str + status: str + evaluation_type: EvaluationType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/span.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span.py new file mode 100644 index 0000000000..5cc2557b24 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import datetime as dt +from .span_variant import SpanVariant +from .span_status_code import SpanStatusCode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class Span(UniversalBaseModel): + id: str + name: str + project_id: typing.Optional[str] = None + parent_span_id: typing.Optional[str] = None + created_at: dt.datetime + variant: SpanVariant + environment: typing.Optional[str] = None + spankind: str + status: SpanStatusCode + metadata: typing.Dict[str, typing.Optional[typing.Any]] + trace_id: str + user_id: typing.Optional[str] = None + content: typing.Dict[str, typing.Optional[typing.Any]] + children: typing.Optional[typing.List["Span"]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(Span) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_detail.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_detail.py new file mode 100644 index 0000000000..44c9660657 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_detail.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span import Span +import typing +import datetime as dt +from .span_variant import SpanVariant +from .span_status_code import SpanStatusCode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class SpanDetail(UniversalBaseModel): + id: str + name: str + project_id: typing.Optional[str] = None + parent_span_id: typing.Optional[str] = None + created_at: dt.datetime + variant: SpanVariant + environment: typing.Optional[str] = None + spankind: str + status: SpanStatusCode + metadata: typing.Dict[str, typing.Optional[typing.Any]] + trace_id: str + user_id: typing.Optional[str] = None + content: typing.Dict[str, typing.Optional[typing.Any]] + children: typing.Optional[typing.List[Span]] = None + config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(Span, SpanDetail=SpanDetail) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_dto.py new file mode 100644 index 0000000000..80fdb2b0a3 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_dto.py @@ -0,0 +1,54 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lifecycle_dto import LifecycleDto +from .root_dto import RootDto +from .tree_dto import TreeDto +from .node_dto import NodeDto +from .parent_dto import ParentDto +from .time_dto import TimeDto +from .status_dto import StatusDto +from .exception_dto import ExceptionDto +from .link_dto import LinkDto +from .o_tel_extra_dto import OTelExtraDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class SpanDto(UniversalBaseModel): + lifecycle: typing.Optional[LifecycleDto] = None + root: RootDto + tree: TreeDto + node: NodeDto + parent: typing.Optional[ParentDto] = None + time: TimeDto + status: StatusDto + exception: typing.Optional[ExceptionDto] = None + data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + metrics: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + meta: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + refs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + links: typing.Optional[typing.List[LinkDto]] = None + otel: typing.Optional[OTelExtraDto] = None + nodes: typing.Optional[ + typing.Dict[str, typing.Optional["SpanDtoNodesValue"]] + ] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .span_dto_nodes_value import SpanDtoNodesValue # noqa: E402 + +update_forward_refs(SpanDto) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_dto_nodes_value.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_dto_nodes_value.py new file mode 100644 index 0000000000..93e28b70de --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_dto_nodes_value.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +import typing + +if typing.TYPE_CHECKING: + from .span_dto import SpanDto +SpanDtoNodesValue = typing.Union["SpanDto", typing.List["SpanDto"]] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_status_code.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_status_code.py new file mode 100644 index 0000000000..cb5a002953 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_status_code.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpanStatusCode = typing.Union[typing.Literal["UNSET", "OK", "ERROR"], typing.Any] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_variant.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_variant.py new file mode 100644 index 0000000000..4471b2229c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/span_variant.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SpanVariant(UniversalBaseModel): + variant_id: typing.Optional[str] = None + variant_name: typing.Optional[str] = None + revision: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/status_code.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/status_code.py new file mode 100644 index 0000000000..ab7c307ab7 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/status_code.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +StatusCode = typing.Union[typing.Literal["UNSET", "OK", "ERROR"], typing.Any] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/status_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/status_dto.py new file mode 100644 index 0000000000..44f2ef907b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/status_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .status_code import StatusCode +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class StatusDto(UniversalBaseModel): + code: StatusCode + message: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/template.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/template.py new file mode 100644 index 0000000000..af02645289 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/template.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .template_image_info import TemplateImageInfo +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class Template(UniversalBaseModel): + id: str + image: TemplateImageInfo + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/template_image_info.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/template_image_info.py new file mode 100644 index 0000000000..b179682fda --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/template_image_info.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import datetime as dt +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TemplateImageInfo(UniversalBaseModel): + name: str + size: typing.Optional[int] = None + digest: typing.Optional[str] = None + title: str + description: str + last_pushed: typing.Optional[dt.datetime] = None + repo_name: typing.Optional[str] = None + template_uri: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/test_set_output_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/test_set_output_response.py new file mode 100644 index 0000000000..34023d5c7a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/test_set_output_response.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing_extensions +from ..core.serialization import FieldMetadata +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class TestSetOutputResponse(UniversalBaseModel): + id: typing_extensions.Annotated[str, FieldMetadata(alias="_id")] + name: str + created_at: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/test_set_simple_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/test_set_simple_response.py new file mode 100644 index 0000000000..659d429caa --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/test_set_simple_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class TestSetSimpleResponse(UniversalBaseModel): + id: str + name: str + created_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/time_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/time_dto.py new file mode 100644 index 0000000000..5def8ab023 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/time_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import datetime as dt +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class TimeDto(UniversalBaseModel): + start: dt.datetime + end: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/trace_detail.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/trace_detail.py new file mode 100644 index 0000000000..22ba262a83 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/trace_detail.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span import Span +import typing +import datetime as dt +from .span_variant import SpanVariant +from .span_status_code import SpanStatusCode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class TraceDetail(UniversalBaseModel): + id: str + name: str + project_id: typing.Optional[str] = None + parent_span_id: typing.Optional[str] = None + created_at: dt.datetime + variant: SpanVariant + environment: typing.Optional[str] = None + spankind: str + status: SpanStatusCode + metadata: typing.Dict[str, typing.Optional[typing.Any]] + trace_id: str + user_id: typing.Optional[str] = None + content: typing.Dict[str, typing.Optional[typing.Any]] + children: typing.Optional[typing.List[Span]] = None + config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(Span, TraceDetail=TraceDetail) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/tree_dto.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/tree_dto.py new file mode 100644 index 0000000000..dfb98faaac --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/tree_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .tree_type import TreeType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TreeDto(UniversalBaseModel): + id: str + type: typing.Optional[TreeType] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/tree_type.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/tree_type.py new file mode 100644 index 0000000000..3be7057bec --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/tree_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TreeType = typing.Literal["invocation"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/update_app_output.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/update_app_output.py new file mode 100644 index 0000000000..deede4ef37 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/update_app_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class UpdateAppOutput(UniversalBaseModel): + app_id: str + app_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/uri.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/uri.py new file mode 100644 index 0000000000..7c9a2fab47 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/uri.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class Uri(UniversalBaseModel): + uri: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/validation_error.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/validation_error.py new file mode 100644 index 0000000000..4b6d592bda --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/validation_error.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .validation_error_loc_item import ValidationErrorLocItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ValidationError(UniversalBaseModel): + loc: typing.List[ValidationErrorLocItem] + msg: str + type: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/validation_error_loc_item.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/validation_error_loc_item.py new file mode 100644 index 0000000000..9a0a83fef5 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/validation_error_loc_item.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ValidationErrorLocItem = typing.Union[str, int] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/variant_action.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/variant_action.py new file mode 100644 index 0000000000..0dec29eea2 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/variant_action.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .variant_action_enum import VariantActionEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class VariantAction(UniversalBaseModel): + action: VariantActionEnum + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/variant_action_enum.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/variant_action_enum.py new file mode 100644 index 0000000000..1bc746b0fd --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/variant_action_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VariantActionEnum = typing.Union[typing.Literal["START", "STOP"], typing.Any] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/with_pagination.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/with_pagination.py new file mode 100644 index 0000000000..cb02dbd92b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/with_pagination.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import typing_extensions +from ..core.serialization import FieldMetadata +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WithPagination(UniversalBaseModel): + data: typing.List[typing.Optional[typing.Any]] + total: int + page: int + page_size: typing_extensions.Annotated[int, FieldMetadata(alias="pageSize")] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_member_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_member_response.py new file mode 100644 index 0000000000..60f29c7110 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_member_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .workspace_permission import WorkspacePermission +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspaceMemberResponse(UniversalBaseModel): + user: typing.Dict[str, typing.Optional[typing.Any]] + roles: typing.List[WorkspacePermission] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_permission.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_permission.py new file mode 100644 index 0000000000..028e712cd6 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_permission.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .workspace_role import WorkspaceRole +import typing +from .permission import Permission +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspacePermission(UniversalBaseModel): + role_name: WorkspaceRole + role_description: typing.Optional[str] = None + permissions: typing.Optional[typing.List[Permission]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_response.py new file mode 100644 index 0000000000..a042d463e8 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .workspace_member_response import WorkspaceMemberResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspaceResponse(UniversalBaseModel): + created_at: typing.Optional[str] = None + updated_at: typing.Optional[str] = None + id: str + name: str + description: typing.Optional[str] = None + type: typing.Optional[str] = None + organization: str + members: typing.Optional[typing.List[WorkspaceMemberResponse]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_role.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_role.py new file mode 100644 index 0000000000..065ea0abf3 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_role.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +WorkspaceRole = typing.Union[ + typing.Literal[ + "owner", + "viewer", + "editor", + "evaluator", + "workspace_admin", + "deployment_manager", + ], + typing.Any, +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_role_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_role_response.py new file mode 100644 index 0000000000..f210591fcb --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/types/workspace_role_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .workspace_role import WorkspaceRole +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspaceRoleResponse(UniversalBaseModel): + role_name: WorkspaceRole + role_description: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/variants/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/__init__.py new file mode 100644 index 0000000000..71317185ab --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import AddVariantFromBaseAndConfigResponse + +__all__ = ["AddVariantFromBaseAndConfigResponse"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/variants/client.py b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/client.py new file mode 100644 index 0000000000..389a77f778 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/client.py @@ -0,0 +1,2748 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from .types.add_variant_from_base_and_config_response import ( + AddVariantFromBaseAndConfigResponse, +) +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.app_variant_response import AppVariantResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..types.variant_action import VariantAction +from ..types.docker_env_vars import DockerEnvVars +from ..types.uri import Uri +from ..core.serialization import convert_and_respect_annotation_metadata +from ..types.app_variant_revision import AppVariantRevision +from ..types.reference_request_model import ReferenceRequestModel +from ..types.config_response_model import ConfigResponseModel +from ..types.config_dto import ConfigDto +from ..types.reference_dto import ReferenceDto +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class VariantsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def add_variant_from_base_and_config( + self, + *, + base_id: str, + new_variant_name: str, + new_config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> AddVariantFromBaseAndConfigResponse: + """ + Add a new variant based on an existing one. + Same as POST /config + + Args: + payload (AddVariantFromBasePayload): Payload containing base variant ID, new variant name, and parameters. + stoken_session (SessionContainer, optional): Session container. Defaults to result of verify_session(). + + Raises: + HTTPException: Raised if the variant could not be added or accessed. + + Returns: + Union[AppVariantResponse, Any]: New variant details or exception. + + Parameters + ---------- + base_id : str + + new_variant_name : str + + new_config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddVariantFromBaseAndConfigResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.add_variant_from_base_and_config( + base_id="base_id", + new_variant_name="new_variant_name", + new_config_name="new_config_name", + parameters={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/from-base", + method="POST", + json={ + "base_id": base_id, + "new_variant_name": new_variant_name, + "new_config_name": new_config_name, + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddVariantFromBaseAndConfigResponse, + parse_obj_as( + type_=AddVariantFromBaseAndConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.get_variant( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def start_variant( + self, + variant_id: str, + *, + action: VariantAction, + env_vars: typing.Optional[DockerEnvVars] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Start a variant of an app. + + Args: + variant_id (str): The ID of the variant to start. + action (VariantAction): The action to perform on the variant (start). + env_vars (Optional[DockerEnvVars], optional): The environment variables to inject to the Docker container. Defaults to None. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + URI: The URL of the started variant. + + Raises: + HTTPException: If the app container cannot be started. + + Parameters + ---------- + variant_id : str + + action : VariantAction + + env_vars : typing.Optional[DockerEnvVars] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + from agenta import AgentaApi, VariantAction + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.start_variant( + variant_id="variant_id", + action=VariantAction( + action="START", + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="PUT", + json={ + "action": convert_and_respect_annotation_metadata( + object_=action, annotation=VariantAction, direction="write" + ), + "env_vars": convert_and_respect_annotation_metadata( + object_=env_vars, annotation=DockerEnvVars, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Remove a variant from the server. + In the case it's the last variant using the image, stop the container and remove the image. + + Arguments: + app_variant -- AppVariant to remove + + Raises: + HTTPException: If there is a problem removing the app variant + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.remove_variant( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_variant_parameters( + self, + variant_id: str, + *, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the parameters for an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + payload (UpdateVariantParameterPayload): The payload containing the updated parameters. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If there is an error while trying to update the app variant. + + Returns: + JSONResponse: A JSON response containing the updated app variant parameters. + + Parameters + ---------- + variant_id : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.update_variant_parameters( + variant_id="variant_id", + parameters={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/parameters", + method="PUT", + json={ + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_variant_image( + self, + variant_id: str, + *, + docker_id: str, + tags: str, + type: typing.Optional[str] = OMIT, + organization_id: typing.Optional[str] = OMIT, + workspace_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the image used in an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + image (Image): The image information to update. + + Raises: + HTTPException: If an error occurs while trying to update the app variant. + + Returns: + JSONResponse: A JSON response indicating whether the update was successful or not. + + Parameters + ---------- + variant_id : str + + docker_id : str + + tags : str + + type : typing.Optional[str] + + organization_id : typing.Optional[str] + + workspace_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.update_variant_image( + variant_id="variant_id", + docker_id="docker_id", + tags="tags", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/image", + method="PUT", + json={ + "type": type, + "docker_id": docker_id, + "tags": tags, + "organization_id": organization_id, + "workspace_id": workspace_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def retrieve_variant_logs( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.retrieve_variant_logs( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/logs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant_revisions( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[AppVariantRevision]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantRevision] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.get_variant_revisions( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantRevision], + parse_obj_as( + type_=typing.List[AppVariantRevision], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant_revision( + self, + variant_id: str, + revision_number: int, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantRevision: + """ + Parameters + ---------- + variant_id : str + + revision_number : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantRevision + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.get_variant_revision( + variant_id="variant_id", + revision_number=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions/{jsonable_encoder(revision_number)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantRevision, + parse_obj_as( + type_=AppVariantRevision, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_add( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: ReferenceRequestModel, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : ReferenceRequestModel + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_add( + variant_ref=ReferenceRequestModel(), + application_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/add", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_fetch( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_fetch() + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/fetch", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_fork( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_fork() + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/fork", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_commit( + self, + *, + config: ConfigDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + config : ConfigDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi, ConfigDto + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_commit( + config=ConfigDto( + params={"key": "value"}, + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/commit", + method="POST", + json={ + "config": convert_and_respect_annotation_metadata( + object_=config, annotation=ConfigDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_deploy( + self, + *, + variant_ref: ReferenceRequestModel, + environment_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + environment_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_deploy( + variant_ref=ReferenceRequestModel(), + environment_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/deploy", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_delete( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> int: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + int + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_delete( + variant_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/delete", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + int, + parse_obj_as( + type_=int, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_list( + self, + *, + application_ref: ReferenceDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + application_ref : ReferenceDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceDto + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_list( + application_ref=ReferenceDto(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/list", + method="POST", + json={ + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, annotation=ReferenceDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_history( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_history( + variant_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/history", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncVariantsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def add_variant_from_base_and_config( + self, + *, + base_id: str, + new_variant_name: str, + new_config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> AddVariantFromBaseAndConfigResponse: + """ + Add a new variant based on an existing one. + Same as POST /config + + Args: + payload (AddVariantFromBasePayload): Payload containing base variant ID, new variant name, and parameters. + stoken_session (SessionContainer, optional): Session container. Defaults to result of verify_session(). + + Raises: + HTTPException: Raised if the variant could not be added or accessed. + + Returns: + Union[AppVariantResponse, Any]: New variant details or exception. + + Parameters + ---------- + base_id : str + + new_variant_name : str + + new_config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddVariantFromBaseAndConfigResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.add_variant_from_base_and_config( + base_id="base_id", + new_variant_name="new_variant_name", + new_config_name="new_config_name", + parameters={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/from-base", + method="POST", + json={ + "base_id": base_id, + "new_variant_name": new_variant_name, + "new_config_name": new_config_name, + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddVariantFromBaseAndConfigResponse, + parse_obj_as( + type_=AddVariantFromBaseAndConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.get_variant( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def start_variant( + self, + variant_id: str, + *, + action: VariantAction, + env_vars: typing.Optional[DockerEnvVars] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Start a variant of an app. + + Args: + variant_id (str): The ID of the variant to start. + action (VariantAction): The action to perform on the variant (start). + env_vars (Optional[DockerEnvVars], optional): The environment variables to inject to the Docker container. Defaults to None. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + URI: The URL of the started variant. + + Raises: + HTTPException: If the app container cannot be started. + + Parameters + ---------- + variant_id : str + + action : VariantAction + + env_vars : typing.Optional[DockerEnvVars] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, VariantAction + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.start_variant( + variant_id="variant_id", + action=VariantAction( + action="START", + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="PUT", + json={ + "action": convert_and_respect_annotation_metadata( + object_=action, annotation=VariantAction, direction="write" + ), + "env_vars": convert_and_respect_annotation_metadata( + object_=env_vars, annotation=DockerEnvVars, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Remove a variant from the server. + In the case it's the last variant using the image, stop the container and remove the image. + + Arguments: + app_variant -- AppVariant to remove + + Raises: + HTTPException: If there is a problem removing the app variant + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.remove_variant( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_variant_parameters( + self, + variant_id: str, + *, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the parameters for an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + payload (UpdateVariantParameterPayload): The payload containing the updated parameters. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If there is an error while trying to update the app variant. + + Returns: + JSONResponse: A JSON response containing the updated app variant parameters. + + Parameters + ---------- + variant_id : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.update_variant_parameters( + variant_id="variant_id", + parameters={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/parameters", + method="PUT", + json={ + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_variant_image( + self, + variant_id: str, + *, + docker_id: str, + tags: str, + type: typing.Optional[str] = OMIT, + organization_id: typing.Optional[str] = OMIT, + workspace_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the image used in an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + image (Image): The image information to update. + + Raises: + HTTPException: If an error occurs while trying to update the app variant. + + Returns: + JSONResponse: A JSON response indicating whether the update was successful or not. + + Parameters + ---------- + variant_id : str + + docker_id : str + + tags : str + + type : typing.Optional[str] + + organization_id : typing.Optional[str] + + workspace_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.update_variant_image( + variant_id="variant_id", + docker_id="docker_id", + tags="tags", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/image", + method="PUT", + json={ + "type": type, + "docker_id": docker_id, + "tags": tags, + "organization_id": organization_id, + "workspace_id": workspace_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def retrieve_variant_logs( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.retrieve_variant_logs( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/logs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant_revisions( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[AppVariantRevision]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantRevision] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.get_variant_revisions( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantRevision], + parse_obj_as( + type_=typing.List[AppVariantRevision], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant_revision( + self, + variant_id: str, + revision_number: int, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantRevision: + """ + Parameters + ---------- + variant_id : str + + revision_number : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantRevision + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.get_variant_revision( + variant_id="variant_id", + revision_number=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions/{jsonable_encoder(revision_number)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantRevision, + parse_obj_as( + type_=AppVariantRevision, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_add( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: ReferenceRequestModel, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : ReferenceRequestModel + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_add( + variant_ref=ReferenceRequestModel(), + application_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/add", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_fetch( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_fetch() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/fetch", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_fork( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_fork() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/fork", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_commit( + self, + *, + config: ConfigDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + config : ConfigDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ConfigDto + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_commit( + config=ConfigDto( + params={"key": "value"}, + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/commit", + method="POST", + json={ + "config": convert_and_respect_annotation_metadata( + object_=config, annotation=ConfigDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_deploy( + self, + *, + variant_ref: ReferenceRequestModel, + environment_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + environment_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_deploy( + variant_ref=ReferenceRequestModel(), + environment_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/deploy", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_delete( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> int: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + int + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_delete( + variant_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/delete", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + int, + parse_obj_as( + type_=int, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_list( + self, + *, + application_ref: ReferenceDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + application_ref : ReferenceDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceDto + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_list( + application_ref=ReferenceDto(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/list", + method="POST", + json={ + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, annotation=ReferenceDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_history( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_history( + variant_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/history", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/variants/types/__init__.py b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/types/__init__.py new file mode 100644 index 0000000000..fac6b42753 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +from .add_variant_from_base_and_config_response import ( + AddVariantFromBaseAndConfigResponse, +) + +__all__ = ["AddVariantFromBaseAndConfigResponse"] diff --git a/agenta-cli/debugging/simple-app/agenta/client/backend/variants/types/add_variant_from_base_and_config_response.py b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/types/add_variant_from_base_and_config_response.py new file mode 100644 index 0000000000..0b9252e08b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/backend/variants/types/add_variant_from_base_and_config_response.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.app_variant_response import AppVariantResponse + +AddVariantFromBaseAndConfigResponse = typing.Union[ + AppVariantResponse, typing.Optional[typing.Any] +] diff --git a/agenta-cli/debugging/simple-app/agenta/client/client.py b/agenta-cli/debugging/simple-app/agenta/client/client.py new file mode 100644 index 0000000000..d5e4547f74 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/client.py @@ -0,0 +1,563 @@ +from typing import Dict, Any, Optional +import os +import time +import click +from pathlib import Path +from typing import List, Optional, Dict, Any + +import requests +from agenta.client.api_models import AppVariant, Image, VariantConfigPayload +from docker.models.images import Image as DockerImage +from requests.exceptions import RequestException + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") + + +class APIRequestError(Exception): + """Exception to be raised when an API request fails.""" + + +def get_base_by_app_id_and_name( + app_id: str, base_name: str, host: str, api_key: str = None +) -> str: + """ + Get the base ID for a given app ID and base name. + + Args: + app_id (str): The ID of the app. + base_name (str): The name of the base. + host (str): The URL of the server. + api_key (str, optional): The API key to use for authentication. Defaults to None. + + Returns: + str: The ID of the base. + + Raises: + APIRequestError: If the request to get the base fails or the base does not exist on the server. + """ + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/bases/?app_id={app_id}&base_name={base_name}", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to get base failed with status code {response.status_code} and error message: {error_message}." + ) + if len(response.json()) == 0: + raise APIRequestError( + f"Base with name {base_name} does not exist on the server." + ) + else: + return response.json()[0]["base_id"] + + +def get_app_by_name(app_name: str, host: str, api_key: str = None) -> str: + """Get app by its name on the server. + + Args: + app_name (str): Name of the app + host (str): Hostname of the server + api_key (str): The API key to use for the request. + """ + + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/apps/?app_name={app_name}", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to get app failed with status code {response.status_code} and error message: {error_message}." + ) + if len(response.json()) == 0: + raise APIRequestError(f"App with name {app_name} does not exist on the server.") + else: + return response.json()[0]["app_id"] # only one app should exist for that name + + +def create_new_app(app_name: str, host: str, api_key: str = None) -> str: + """Creates new app on the server. + + Args: + app_name (str): Name of the app + host (str): Hostname of the server + api_key (str): The API key to use for the request. + """ + + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/apps/", + json={"app_name": app_name}, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to create new app failed with status code {response.status_code} and error message: {error_message}." + ) + return response.json()["app_id"] + + +def add_variant_to_server( + app_id: str, + base_name: str, + image: Image, + host: str, + api_key: str = None, + retries=10, + backoff_factor=1, +) -> Dict: + """ + Adds a variant to the server with a retry mechanism and a single-line loading state. + + Args: + app_id (str): The ID of the app to add the variant to. + base_name (str): The base name for the variant. + image (Image): The image to use for the variant. + host (str): The host URL of the server. + api_key (str): The API key to use for the request. + retries (int): Number of times to retry the request. + backoff_factor (float): Factor to determine the delay between retries (exponential backoff). + + Returns: + dict: The JSON response from the server. + + Raises: + APIRequestError: If the request to the server fails after retrying. + """ + variant_name = f"{base_name.lower()}.default" + payload = { + "variant_name": variant_name, + "base_name": base_name.lower(), + "config_name": "default", + "docker_id": image.docker_id, + "tags": image.tags, + } + + click.echo( + click.style("Waiting for the variant to be ready", fg="yellow"), nl=False + ) + + for attempt in range(retries): + try: + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/apps/{app_id}/variant/from-image/", + json=payload, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + response.raise_for_status() + click.echo(click.style("\nVariant added successfully.", fg="green")) + return response.json() + except RequestException as e: + if attempt < retries - 1: + click.echo(click.style(".", fg="yellow"), nl=False) + time.sleep(backoff_factor * (2**attempt)) + else: + raise APIRequestError( + click.style( + f"\nRequest to app_variant endpoint failed with status code {response.status_code} and error message: {e}.", + fg="red", + ) + ) + except Exception as e: + raise APIRequestError( + click.style(f"\nAn unexpected error occurred: {e}", fg="red") + ) + + +def start_variant( + variant_id: str, + host: str, + env_vars: Optional[Dict[str, str]] = None, + api_key: str = None, +) -> str: + """ + Starts or stops a container with the given variant and exposes its endpoint. + + Args: + variant_id (str): The ID of the variant. + host (str): The host URL. + env_vars (Optional[Dict[str, str]]): Optional environment variables to inject into the container. + api_key (str): The API key to use for the request. + + Returns: + str: The endpoint of the container. + + Raises: + APIRequestError: If the API request fails. + """ + payload = {} + payload["action"] = {"action": "START"} + if env_vars: + payload["env_vars"] = env_vars + try: + response = requests.put( + f"{host}/{BACKEND_URL_SUFFIX}/variants/{variant_id}/", + json=payload, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code == 404: + raise APIRequestError( + f"404: Variant with ID {variant_id} does not exist on the server." + ) + elif response.status_code != 200: + error_message = response.text + raise APIRequestError( + f"Request to start variant endpoint failed with status code {response.status_code} and error message: {error_message}." + ) + return response.json().get("uri", "") + + except RequestException as e: + raise APIRequestError(f"An error occurred while making the request: {e}") + + +def list_variants(app_id: str, host: str, api_key: str = None) -> List[AppVariant]: + """ + Returns a list of AppVariant objects for a given app_id and host. + + Args: + app_id (str): The ID of the app to retrieve variants for. + host (str): The URL of the host to make the request to. + api_key (str): The API key to use for the request. + + Returns: + List[AppVariant]: A list of AppVariant objects for the given app_id and host. + """ + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/apps/{app_id}/variants/", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + # Check for successful request + if response.status_code == 403: + raise APIRequestError( + f"No app by id {app_id} exists or you do not have access to it." + ) + elif response.status_code == 404: + raise APIRequestError( + f"No app by id {app_id} exists or you do not have access to it." + ) + elif response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to apps endpoint failed with status code {response.status_code} and error message: {error_message}." + ) + + app_variants = response.json() + return [AppVariant(**variant) for variant in app_variants] + + +def remove_variant(variant_id: str, host: str, api_key: str = None): + """ + Sends a DELETE request to the Agenta backend to remove a variant with the given ID. + + Args: + variant_id (str): The ID of the variant to be removed. + host (str): The URL of the Agenta backend. + api_key (str): The API key to use for the request. + + Raises: + APIRequestError: If the request to the remove_variant endpoint fails. + + Returns: + None + """ + response = requests.delete( + f"{host}/{BACKEND_URL_SUFFIX}/variants/{variant_id}", + headers={ + "Content-Type": "application/json", + "Authorization": api_key if api_key is not None else None, + }, + timeout=600, + ) + + # Check for successful request + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to remove_variant endpoint failed with status code {response.status_code} and error message: {error_message}" + ) + + +def update_variant_image(variant_id: str, image: Image, host: str, api_key: str = None): + """ + Update the image of a variant with the given ID. + + Args: + variant_id (str): The ID of the variant to update. + image (Image): The new image to set for the variant. + host (str): The URL of the host to send the request to. + api_key (str): The API key to use for the request. + + Raises: + APIRequestError: If the request to update the variant fails. + + Returns: + None + """ + response = requests.put( + f"{host}/{BACKEND_URL_SUFFIX}/variants/{variant_id}/image/", + json=image.dict(), + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to update app_variant failed with status code {response.status_code} and error message: {error_message}." + ) + + +def send_docker_tar( + app_id: str, base_name: str, tar_path: Path, host: str, api_key: str = None +) -> Image: + """ + Sends a Docker tar file to the specified host to build an image for the given app ID and variant name. + + Args: + app_id (str): The ID of the app. + base_name (str): The name of the codebase. + tar_path (Path): The path to the Docker tar file. + host (str): The URL of the host to send the request to. + api_key (str): The API key to use for the request. + + Returns: + Image: The built Docker image. + + Raises: + Exception: If the response status code is 500, indicating that serving the variant failed. + """ + with tar_path.open("rb") as tar_file: + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/containers/build_image/?app_id={app_id}&base_name={base_name}", + files={ + "tar_file": tar_file, + }, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=1200, + ) + + if response.status_code == 500: + response_error = response.json() + error_msg = "Serving the variant failed.\n" + error_msg += f"Log: {response_error}\n" + error_msg += "Here's how you may be able to solve the issue:\n" + error_msg += "- First, make sure that the requirements.txt file has all the dependencies that you need.\n" + error_msg += "- Second, check the Docker logs for the backend image to see the error when running the Docker container." + raise Exception(error_msg) + + response.raise_for_status() + image = Image.parse_obj(response.json()) + return image + + +def save_variant_config( + base_id: str, + config_name: str, + parameters: Dict[str, Any], + overwrite: bool, + host: str, + api_key: Optional[str] = None, +) -> None: + """ + Saves a variant configuration to the Agenta backend. + If the config already exists, it will be overwritten if the overwrite argument is set to True. + If the config does does not exist, a new variant will be created. + + Args: + base_id (str): The ID of the base configuration. + config_name (str): The name of the variant configuration. + parameters (Dict[str, Any]): The parameters of the variant configuration. + overwrite (bool): Whether to overwrite an existing variant configuration with the same name. + host (str): The URL of the Agenta backend. + api_key (Optional[str], optional): The API key to use for authentication. Defaults to None. + + Raises: + ValueError: If the 'host' argument is not specified. + APIRequestError: If the request to the Agenta backend fails. + + Returns: + None + """ + if host is None: + raise ValueError("The 'host' is not specified in save_variant_config") + + variant_config = VariantConfigPayload( + base_id=base_id, + config_name=config_name, + parameters=parameters, + overwrite=overwrite, + ) + try: + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/configs/", + json=variant_config.dict(), + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + request = f"POST {host}/{BACKEND_URL_SUFFIX}/configs/ {variant_config.dict()}" + # Check for successful request + if response.status_code != 200: + error_message = response.json().get("detail", "Unknown error") + raise APIRequestError( + f"Request {request} to save_variant_config endpoint failed with status code {response.status_code}. Error message: {error_message}" + ) + except RequestException as e: + raise APIRequestError(f"Request failed: {str(e)}") + + +def fetch_variant_config( + base_id: str, + host: str, + config_name: Optional[str] = None, + environment_name: Optional[str] = None, + api_key: Optional[str] = None, +) -> Dict[str, Any]: + """ + Fetch a variant configuration from the server. + + Args: + base_id (str): ID of the base configuration. + config_name (str): Configuration name. + environment_name (str): Name of the environment. + host (str): The server host URL. + api_key (Optional[str], optional): The API key to use for authentication. Defaults to None. + + Raises: + APIRequestError: If the API request fails. + + Returns: + dict: The requested variant configuration. + """ + + if host is None: + raise ValueError("The 'host' is not specified in fetch_variant_config") + + try: + if environment_name: + endpoint_params = f"?base_id={base_id}&environment_name={environment_name}" + elif config_name: + endpoint_params = f"?base_id={base_id}&config_name={config_name}" + else: + raise ValueError( + "Either 'config_name' or 'environment_name' must be specified in fetch_variant_config" + ) + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/configs/{endpoint_params}", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + + request = f"GET {host}/{BACKEND_URL_SUFFIX}/configs/ {base_id} {config_name} {environment_name}" + + # Check for successful request + if response.status_code != 200: + error_message = response.json().get("detail", "Unknown error") + raise APIRequestError( + f"Request {request} to fetch_variant_config endpoint failed with status code {response.status_code}. Error message: {error_message}" + ) + + return response.json() + + except RequestException as e: + raise APIRequestError(f"Request failed: {str(e)}") + + +def validate_api_key(api_key: str, host: str) -> bool: + """ + Validates an API key with the Agenta backend. + + Args: + api_key (str): The API key to validate. + host (str): The URL of the Agenta backend. + + Returns: + bool: Whether the API key is valid or not. + """ + try: + headers = {"Authorization": api_key} + + prefix = api_key.split(".")[0] + + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/keys/{prefix}/validate/", + headers=headers, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to validate api key failed with status code {response.status_code} and error message: {error_message}." + ) + return True + except RequestException as e: + raise APIRequestError(f"An error occurred while making the request: {e}") + + +def retrieve_user_id(host: str, api_key: Optional[str] = None) -> str: + """Retrieve user ID from the server. + + Args: + host (str): The URL of the Agenta backend + api_key (str): The API key to validate with. + + Returns: + str: the user ID + """ + + try: + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/profile/", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json().get("detail", "Unknown error") + raise APIRequestError( + f"Request to fetch_user_profile endpoint failed with status code {response.status_code}. Error message: {error_message}" + ) + return response.json()["id"] + except RequestException as e: + raise APIRequestError(f"Request failed: {str(e)}") + + +from pydantic import BaseModel + + +# def run_evaluation(app_name: str, host: str, api_key: str = None) -> str: +def run_evaluation(app_name: str, host: str, api_key: str = None) -> str: + """Creates new app on the server. + Args: + app_name (str): Name of the app + host (str): Hostname of the server + api_key (str): The API key to use for the request. + """ + + new_evaluation = { + "app_id": "6583e552eb855930ec6b1bdd", + "variant_ids": [ + "6583e552eb855930ec6b1be3", + # "6570aed55d0eaff2293088e6" + ], + "evaluators_configs": ["65856b2b11d53fcce5894ab6"], + "testset_id": "6583e552eb855930ec6b1be4", + } + + response = requests.post( + f"{host}/api/evaluations/", + json=new_evaluation, + # headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to run evaluations failed with status code {response.status_code} and error message: {error_message}." + ) + print(response.json()) + return response.json() diff --git a/agenta-cli/debugging/simple-app/agenta/client/exceptions.py b/agenta-cli/debugging/simple-app/agenta/client/exceptions.py new file mode 100644 index 0000000000..ac6fafe529 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/client/exceptions.py @@ -0,0 +1,2 @@ +class APIRequestError(Exception): + """Exception to be raised when an API request fails.""" diff --git a/agenta-cli/debugging/simple-app/agenta/config.py b/agenta-cli/debugging/simple-app/agenta/config.py new file mode 100644 index 0000000000..4034d7cd86 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/config.py @@ -0,0 +1,25 @@ +try: + from pydantic.v1 import BaseSettings # type: ignore +except ImportError: + from pydantic import BaseSettings # type: ignore + +import os +import toml +from pathlib import Path + +# Load the settings from the .toml file +toml_config = toml.load(f"{Path(__file__).parent}/config.toml") + +# Set the environment variables from the TOML configurations +os.environ["REGISTRY"] = toml_config["registry"] +os.environ["BACKEND_URL_SUFFIX"] = toml_config["backend_url_suffix"] +os.environ["ALLOW_ORIGINS"] = toml_config["allow_origins"] + + +class Settings(BaseSettings): + registry: str + backend_url_suffix: str + allow_origins: str + + +settings = Settings() diff --git a/agenta-cli/debugging/simple-app/agenta/config.toml b/agenta-cli/debugging/simple-app/agenta/config.toml new file mode 100644 index 0000000000..a29287ccea --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/config.toml @@ -0,0 +1,4 @@ +docker_registry_url="127.0.0.1:5001" +registry="agenta-server" +backend_url_suffix="api" +allow_origins="http://localhost:3000,http://localhost:3001,http://cloud.agenta.ai,https://cloud.agenta.ai" \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/Dockerfile.cloud.template b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/Dockerfile.cloud.template new file mode 100644 index 0000000000..633521b95c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/Dockerfile.cloud.template @@ -0,0 +1,9 @@ +FROM public.ecr.aws/h3w6n5z0/agentaai/lambda_templates_public:main + +COPY requirements.txt ${LAMBDA_TASK_ROOT} +RUN pip install --no-cache-dir --disable-pip-version-check -U agenta +RUN pip install --no-cache-dir --disable-pip-version-check -r requirements.txt +RUN pip install --no-cache-dir --disable-pip-version-check mangum +COPY . ${LAMBDA_TASK_ROOT} + +CMD [ "lambda_function.handler" ] diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/Dockerfile.template b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/Dockerfile.template new file mode 100644 index 0000000000..9eb6b06a54 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/Dockerfile.template @@ -0,0 +1,13 @@ +FROM agentaai/templates_v2:main + +WORKDIR /app + +COPY . . + +RUN pip install --no-cache-dir --disable-pip-version-check -U agenta +RUN pip install --no-cache-dir --disable-pip-version-check -r requirements.txt + +EXPOSE 80 + +RUN ["chmod", "+x", "./entrypoint.sh"] +CMD ["./entrypoint.sh"] diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/README.md b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/README.md new file mode 100644 index 0000000000..c448906cba --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/README.md @@ -0,0 +1 @@ +The code here is just used when creating the template to dockerize the app. It is not part of the cli. \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/entrypoint.sh b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/entrypoint.sh new file mode 100755 index 0000000000..3c6b353144 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +exec python main.py diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/lambda_function.py b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/lambda_function.py new file mode 100644 index 0000000000..ca186d6e82 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/lambda_function.py @@ -0,0 +1,6 @@ +import agenta +import _app +from mangum import Mangum + + +handler = Mangum(agenta.app) diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/main.py b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/main.py new file mode 100644 index 0000000000..df78f0d322 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker-assets/main.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/agenta-cli/debugging/simple-app/agenta/docker/docker_utils.py b/agenta-cli/debugging/simple-app/agenta/docker/docker_utils.py new file mode 100644 index 0000000000..99bfef038c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/docker/docker_utils.py @@ -0,0 +1,100 @@ +import logging +import shutil +import tarfile +import tempfile +from pathlib import Path +import os + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +DEBUG = os.environ.get("AGENTA_CLI_DEBUG", False) + + +def create_dockerfile(out_folder: Path) -> Path: + """Creates a dockerfile based on the template in the out_folder. + + Arguments: + out_folder -- Folder in which to create the Dockerfile. + """ + assert Path(out_folder).exists(), f"Folder {out_folder} does not exist." + dockerfile_template = ( + Path(__file__).parent / "docker-assets" / "Dockerfile.template" + ) + dockerfile_path = out_folder / "Dockerfile" + shutil.copy(dockerfile_template, dockerfile_path) + dockerfile_template = ( + Path(__file__).parent / "docker-assets" / "Dockerfile.cloud.template" + ) + dockerfile_path = out_folder / "Dockerfile.cloud" + shutil.copy(dockerfile_template, dockerfile_path) + + return dockerfile_path + + +def build_tar_docker_container(folder: Path, file_name: Path) -> Path: + """Builds the tar file container the files needed for the docker container + + Arguments: + folder -- the path containing the code for the app + file_name -- the file containing the main code of the app + Returns: + the path to the created tar file + """ + tarfile_path = folder / "docker.tar.gz" # output file + if tarfile_path.exists(): + tarfile_path.unlink() + + create_dockerfile(folder) + shutil.copytree(Path(__file__).parent.parent, folder / "agenta", dirs_exist_ok=True) + shutil.copy(Path(__file__).parent / "docker-assets" / "main.py", folder) + shutil.copy(Path(__file__).parent / "docker-assets" / "lambda_function.py", folder) + shutil.copy(Path(__file__).parent / "docker-assets" / "entrypoint.sh", folder) + + # Initialize agentaignore_content with an empty string + agentaignore_content = "" + + # Read the contents of .gitignore file + agentaignore_file_path = folder / ".agentaignore" + if agentaignore_file_path.exists(): + with open(agentaignore_file_path, "r") as agentaignore_file: + agentaignore_content = agentaignore_file.read() + + # Create a temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Clean - remove '/' from every files and folders in the gitignore contents + sanitized_patterns = [ + pattern.replace("/", "") for pattern in agentaignore_content.splitlines() + ] + + # Function to ignore files based on the patterns + def ignore_patterns(path, names): + return set(sanitized_patterns) + + # Use a single copytree call with ignore_patterns + shutil.copytree(folder, temp_path, ignore=ignore_patterns, dirs_exist_ok=True) + + # Rename the specified file to _app.py in the temporary directory + shutil.copy(temp_path / file_name, temp_path / "_app.py") + + # Create the tar.gz file + with tarfile.open(tarfile_path, "w:gz") as tar: + tar.add(temp_path, arcname=folder.name) + if not DEBUG: + # Clean up - remove specified files and folders + for item in ["agenta", "main.py", "lambda_function.py", "entrypoint.sh"]: + path = folder / item + if path.exists(): + if path.is_dir(): + shutil.rmtree(path) + else: + path.unlink() + + for dockerfile in folder.glob("Dockerfile*"): + dockerfile.unlink() + + # dockerfile_path.unlink() + return tarfile_path diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/__init__.py new file mode 100644 index 0000000000..c1e40757c4 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/__init__.py @@ -0,0 +1,70 @@ +from typing import Optional, Callable, Any + +from .utils.preinit import PreInitObject # always the first import! + +import agenta.client.backend.types as client_types # pylint: disable=wrong-import-order + +from .types import ( + DictInput, + MultipleChoice, + FloatParam, + InFile, + IntParam, + MultipleChoiceParam, + GroupedMultipleChoiceParam, + TextParam, + MessagesInput, + FileInputURL, + BinaryParam, + Prompt, + AgentaNodeDto, + AgentaNodesResponse, +) + +from .tracing import Tracing, get_tracer +from .decorators.tracing import instrument +from .tracing.conventions import Reference +from .decorators.routing import entrypoint, app, route +from .agenta_init import Config, AgentaSingleton, init as _init +from .utils.costs import calculate_token_usage +from .managers.config import ConfigManager +from .managers.variant import VariantManager +from .managers.deployment import DeploymentManager + +config = PreInitObject("agenta.config", Config) +DEFAULT_AGENTA_SINGLETON_INSTANCE = AgentaSingleton() + +types = client_types + +api = None +async_api = None + +tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore +tracer = get_tracer(tracing) + + +def init( + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, +): + global api, async_api, tracing, tracer # pylint: disable=global-statement + + _init( + host=host, + api_key=api_key, + config_fname=config_fname, + redact=redact, + redact_on_error=redact_on_error, + app_id=app_id, + ) + + api = DEFAULT_AGENTA_SINGLETON_INSTANCE.api # type: ignore + async_api = DEFAULT_AGENTA_SINGLETON_INSTANCE.async_api # type: ignore + + tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore + tracer = get_tracer(tracing) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/agenta_init.py b/agenta-cli/debugging/simple-app/agenta/sdk/agenta_init.py new file mode 100644 index 0000000000..db0a27580a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/agenta_init.py @@ -0,0 +1,303 @@ +import toml +from os import getenv +from typing import Optional, Callable, Any +from importlib.metadata import version + +from agenta.sdk.utils.logging import log +from agenta.sdk.utils.globals import set_global +from agenta.client.backend.client import AgentaApi, AsyncAgentaApi +from agenta.sdk.tracing import Tracing +from agenta.client.exceptions import APIRequestError + +print(".DS_Store") + + +class AgentaSingleton: + """Singleton class to save all the "global variables" for the sdk.""" + + _instance = None + config = None + tracing = None + + api = None + async_api = None + + def __new__(cls): + if not cls._instance: + cls._instance = super(AgentaSingleton, cls).__new__(cls) + return cls._instance + + def init( + self, + *, + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, + ) -> None: + """ + Main function to initialize the singleton. + + Initializes the singleton with the given `app_id`, `host`, and `api_key`. The order of precedence for these variables is: + 1. Explicit argument provided in the function call. + 2. Value from the configuration file specified by `config_fname`. + 3. Environment variables. + + Examples: + ag.init(app_id="xxxx", api_key="xxx") + ag.init(config_fname="config.toml") + ag.init() #assuming env vars are set + + Args: + app_id (Optional[str]): ID of the Agenta application. Defaults to None. If not provided, will look for "app_id" in the config file, then "AGENTA_APP_ID" in environment variables. + host (Optional[str]): Host name of the backend server. Defaults to None. If not provided, will look for "backend_host" in the config file, then "AGENTA_HOST" in environment variables. + api_key (Optional[str]): API Key to use with the host of the backend server. Defaults to None. If not provided, will look for "api_key" in the config file, then "AGENTA_API_KEY" in environment variables. + config_fname (Optional[str]): Path to the configuration file (relative or absolute). Defaults to None. + + Raises: + ValueError: If `app_id` is not specified either as an argument, in the config file, or in the environment variables. + """ + + log.info("---------------------------") + log.info("Agenta SDK - using version: %s", version("agenta")) + log.info("---------------------------") + + config = {} + if config_fname: + config = toml.load(config_fname) + + self.host = ( + host + or getenv("AGENTA_HOST") + or config.get("backend_host") + or config.get("host") + or "https://cloud.agenta.ai" + ) + + self.app_id = app_id or config.get("app_id") or getenv("AGENTA_APP_ID") + # if not self.app_id: + # raise ValueError( + # "App ID must be specified. You can provide it in one of the following ways:\n" + # "1. As an argument when calling ag.init(app_id='your_app_id').\n" + # "2. In the configuration file specified by config_fname.\n" + # "3. As an environment variable 'AGENTA_APP_ID'." + # ) + + self.api_key = api_key or getenv("AGENTA_API_KEY") or config.get("api_key") + + self.tracing = Tracing( + url=f"{self.host}/api/observability/v1/otlp/traces", # type: ignore + redact=redact, + redact_on_error=redact_on_error, + ) + + self.tracing.configure( + api_key=self.api_key, + # DEPRECATING + app_id=self.app_id, + ) + + self.api = AgentaApi( + base_url=self.host + "/api", + api_key=self.api_key if self.api_key else "", + ) + + self.async_api = AsyncAgentaApi( + base_url=self.host + "/api", + api_key=self.api_key if self.api_key else "", + ) + + self.base_id = getenv("AGENTA_BASE_ID") + + self.config = Config( + host=self.host, + base_id=self.base_id, + api_key=self.api_key, + ) + + +class Config: + def __init__( + self, + host: str, + base_id: Optional[str] = None, + api_key: Optional[str] = "", + ): + self.host = host + + self.base_id = base_id + + if self.base_id is None: + # print( + # "Warning: Your configuration will not be saved permanently since base_id is not provided.\n" + # ) + pass + + if base_id is None or host is None: + self.persist = False + else: + self.persist = True + self.client = AgentaApi( + base_url=self.host + "/api", + api_key=api_key if api_key else "", + ) + + def register_default(self, overwrite=False, **kwargs): + """alias for default""" + return self.default(overwrite=overwrite, **kwargs) + + def default(self, overwrite=False, **kwargs): + """Saves the default parameters to the app_name and base_name in case they are not already saved. + Args: + overwrite: Whether to overwrite the existing configuration or not + **kwargs: A dict containing the parameters + """ + self.set( + **kwargs + ) # In case there is no connectivity, we still can use the default values + try: + self.push(config_name="default", overwrite=overwrite, **kwargs) + except Exception as ex: + log.warning( + "Unable to push the default configuration to the server. %s", str(ex) + ) + + def push(self, config_name: str, overwrite=True, **kwargs): + """Pushes the parameters for the app variant to the server + Args: + config_name: Name of the configuration to push to + overwrite: Whether to overwrite the existing configuration or not + **kwargs: A dict containing the parameters + """ + if not self.persist: + return + try: + self.client.configs.save_config( + base_id=self.base_id, + config_name=config_name, + parameters=kwargs, + overwrite=overwrite, + ) + except Exception as ex: + log.warning( + "Failed to push the configuration to the server with error: %s", ex + ) + + def pull( + self, config_name: str = "default", environment_name: Optional[str] = None + ): + """Pulls the parameters for the app variant from the server and sets them to the config""" + if not self.persist and ( + config_name != "default" or environment_name is not None + ): + raise ValueError( + "Cannot pull the configuration from the server since the app_name and base_name are not provided." + ) + if self.persist: + try: + if environment_name: + config = self.client.configs.get_config( + base_id=self.base_id, environment_name=environment_name + ) + + else: + config = self.client.configs.get_config( + base_id=self.base_id, + config_name=config_name, + ) + except Exception as ex: + log.warning( + "Failed to pull the configuration from the server with error: %s", + str(ex), + ) + try: + self.set(**{"current_version": config.current_version, **config.parameters}) + except Exception as ex: + log.warning("Failed to set the configuration with error: %s", str(ex)) + + def all(self): + """Returns all the parameters for the app variant""" + return { + k: v + for k, v in self.__dict__.items() + if k + not in [ + "app_name", + "base_name", + "host", + "base_id", + "api_key", + "persist", + "client", + ] + } + + # function to set the parameters for the app variant + def set(self, **kwargs): + """Sets the parameters for the app variant + + Args: + **kwargs: A dict containing the parameters + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + def dump(self): + """Returns all the information about the current version in the configuration. + + Raises: + NotImplementedError: _description_ + """ + + raise NotImplementedError() + + +def init( + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, +): + """Main function to initialize the agenta sdk. + + Initializes agenta with the given `app_id`, `host`, and `api_key`. The order of precedence for these variables is: + 1. Explicit argument provided in the function call. + 2. Value from the configuration file specified by `config_fname`. + 3. Environment variables. + + - `app_id` is a required parameter (to be specified in one of the above ways) + - `host` is optional and defaults to "https://cloud.agenta.ai" + - `api_key` is optional and defaults to "". It is required only when using cloud or enterprise version of agenta. + + + Args: + app_id (Optional[str]): ID of the Agenta application. Defaults to None. If not provided, will look for "app_id" in the config file, then "AGENTA_APP_ID" in environment variables. + host (Optional[str]): Host name of the backend server. Defaults to None. If not provided, will look for "backend_host" in the config file, then "AGENTA_HOST" in environment variables. + api_key (Optional[str]): API Key to use with the host of the backend server. Defaults to None. If not provided, will look for "api_key" in the config file, then "AGENTA_API_KEY" in environment variables. + config_fname (Optional[str]): Path to the configuration file. Defaults to None. + + Raises: + ValueError: If `app_id` is not specified either as an argument, in the config file, or in the environment variables. + """ + + singleton = AgentaSingleton() + + singleton.init( + host=host, + api_key=api_key, + config_fname=config_fname, + redact=redact, + redact_on_error=redact_on_error, + app_id=app_id, + ) + + set_global( + config=singleton.config, + tracing=singleton.tracing, + ) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/assets.py b/agenta-cli/debugging/simple-app/agenta/sdk/assets.py new file mode 100644 index 0000000000..c62cc9dd97 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/assets.py @@ -0,0 +1,84 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4o-mini", + "gpt-4-1106-preview", + ], + "Gemini": ["gemini/gemini-1.5-pro-latest", "gemini/gemini-1.5-flash"], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-3-5-sonnet-20240620", + "anthropic/claude-3-opus-20240229", + "anthropic/claude-3-sonnet-20240229", + "anthropic/claude-3-haiku-20240307", + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/client.py b/agenta-cli/debugging/simple-app/agenta/sdk/client.py new file mode 100644 index 0000000000..ee94ced567 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/client.py @@ -0,0 +1,56 @@ +import os + +from cachetools import TTLCache, cached + +from agenta.client.backend.client import AgentaApi + + +class Agenta: + """Client class for interacting with the Agenta API.""" + + def __init__(self, api_key: str = None, host: str = None): + """ + Initializes the Agenta client with API key and host. + + Raises: + EnvironmentError: If AGENTA_API_KEY is not set. + """ + if not api_key and not os.environ.get("AGENTA_API_KEY"): + raise EnvironmentError( + "Required environment variables AGENTA_API_KEY is not set." + ) + self.api_key = api_key if api_key else os.environ.get("AGENTA_API_KEY") + self.host = ( + host if host else os.environ.get("AGENTA_HOST", "https://cloud.agenta.ai") + ) + self.cache = TTLCache(maxsize=1024, ttl=300) + backend_url = f"{self.host}/api" + self.client = AgentaApi(base_url=backend_url, api_key=self.api_key) + + def get_config(self, base_id: str, environment: str, cache_timeout: int = 300): + """ + Fetches and caches the configuration for a specified base ID and environment. + + Args: + base_id (str): The unique identifier for the base. + environment (str): The environment name (e.g., 'production', 'development'). + cache_timeout (int): The TTL for the cache in seconds. Defaults to 300 seconds. + + Returns: + dict: The configuration data retrieved from the Agenta API. + + Raises: + EnvironmentError: If the required AGENTA_API_KEY is not set in the environment variables. + """ + if cache_timeout != self.cache.ttl: + self.cache = TTLCache( + maxsize=1024, ttl=cache_timeout + ) # TODO: We need to modify this to use a dynamic TTLCache implementation in the future + + @cached(cache=self.cache) + def fetch_config(base_id: str, environment: str = "production"): + return self.client.configs.get_config( + base_id=base_id, environment_name=environment + ) + + return fetch_config(base_id, environment) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/context/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/context/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/context/routing.py b/agenta-cli/debugging/simple-app/agenta/sdk/context/routing.py new file mode 100644 index 0000000000..1d716a69ec --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/context/routing.py @@ -0,0 +1,26 @@ +from contextlib import contextmanager +from contextvars import ContextVar +from typing import Any, Dict, Optional + +routing_context = ContextVar("routing_context", default={}) + + +@contextmanager +def routing_context_manager( + *, + config: Optional[Dict[str, Any]] = None, + application: Optional[Dict[str, Any]] = None, + variant: Optional[Dict[str, Any]] = None, + environment: Optional[Dict[str, Any]] = None, +): + context = { + "config": config, + "application": application, + "variant": variant, + "environment": environment, + } + token = routing_context.set(context) + try: + yield + finally: + routing_context.reset(token) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/context/tracing.py b/agenta-cli/debugging/simple-app/agenta/sdk/context/tracing.py new file mode 100644 index 0000000000..0585a014ad --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/context/tracing.py @@ -0,0 +1,3 @@ +from contextvars import ContextVar + +tracing_context = ContextVar("tracing_context", default={}) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/decorators/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/decorators/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/decorators/routing.py b/agenta-cli/debugging/simple-app/agenta/sdk/decorators/routing.py new file mode 100644 index 0000000000..74a043989e --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/decorators/routing.py @@ -0,0 +1,977 @@ +from typing import Type, Any, Callable, Dict, Optional, Tuple, List +from annotated_types import Ge, Le, Gt, Lt +from pydantic import BaseModel, HttpUrl, ValidationError +from json import dumps +from inspect import signature, iscoroutinefunction, Signature, Parameter, _empty +from argparse import ArgumentParser +from functools import wraps +from asyncio import sleep, get_event_loop +from traceback import format_exc, format_exception +from pathlib import Path +from tempfile import NamedTemporaryFile +from os import environ + +from fastapi.middleware.cors import CORSMiddleware +from fastapi import Body, FastAPI, UploadFile, HTTPException + +from agenta.sdk.middleware.auth import AuthorizationMiddleware +from agenta.sdk.context.routing import routing_context_manager, routing_context +from agenta.sdk.context.tracing import tracing_context +from agenta.sdk.router import router +from agenta.sdk.utils import helpers +from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.utils.logging import log +from agenta.sdk.types import ( + DictInput, + FloatParam, + InFile, + IntParam, + MultipleChoiceParam, + MultipleChoice, + GroupedMultipleChoiceParam, + TextParam, + MessagesInput, + FileInputURL, + BaseResponse, + BinaryParam, +) + +import agenta as ag + + +AGENTA_USE_CORS = str(environ.get("AGENTA_USE_CORS", "true")).lower() in ( + "true", + "1", + "t", +) + +app = FastAPI() +log.setLevel("DEBUG") + + +_MIDDLEWARES = True + + +app.include_router(router, prefix="") + + +class PathValidator(BaseModel): + url: HttpUrl + + +class route: + # This decorator is used to expose specific stages of a workflow (embedding, retrieval, summarization, etc.) + # as independent endpoints. It is designed for backward compatibility with existing code that uses + # the @entrypoint decorator, which has certain limitations. By using @route(), we can create new + # routes without altering the main workflow entrypoint. This helps in modularizing the services + # and provides flexibility in how we expose different functionalities as APIs. + def __init__(self, path, config_schema: BaseModel): + self.config_schema: BaseModel = config_schema + path = "/" + path.strip("/").strip() + path = "" if path == "/" else path + PathValidator(url=f"http://example.com{path}") + + self.route_path = path + + def __call__(self, f): + self.e = entrypoint( + f, route_path=self.route_path, config_schema=self.config_schema + ) + + return f + + +class entrypoint: + """ + Decorator class to wrap a function for HTTP POST, terminal exposure and enable tracing. + + This decorator generates the following endpoints: + + Playground Endpoints + - /generate with @entrypoint, @route("/"), @route(path="") # LEGACY + - /playground/run with @entrypoint, @route("/"), @route(path="") + - /playground/run/{route} with @route({route}), @route(path={route}) + + Deployed Endpoints: + - /generate_deployed with @entrypoint, @route("/"), @route(path="") # LEGACY + - /run with @entrypoint, @route("/"), @route(path="") + - /run/{route} with @route({route}), @route(path={route}) + + The rationale is: + - There may be multiple endpoints, based on the different routes. + - It's better to make it explicit that an endpoint is for the playground. + - Prefixing the routes with /run is more futureproof in case we add more endpoints. + + Example: + ```python + import agenta as ag + + @ag.entrypoint + async def chain_of_prompts_llm(prompt: str): + return ... + ``` + """ + + routes = list() + + def __init__( + self, + func: Callable[..., Any], + route_path="", + config_schema: Optional[BaseModel] = None, + ): + ### --- Update Middleware --- # + try: + global _MIDDLEWARES # pylint: disable=global-statement + + if _MIDDLEWARES: + app.add_middleware( + AuthorizationMiddleware, + host=ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host, + resource_id=ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.app_id, + resource_type="application", + ) + + if AGENTA_USE_CORS: + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], + allow_credentials=True, + ) + + _MIDDLEWARES = False + + except: # pylint: disable=bare-except + log.warning("Agenta SDK - failed to secure route: %s", route_path) + ### --- Update Middleware --- # + + DEFAULT_PATH = "generate" + PLAYGROUND_PATH = "/playground" + RUN_PATH = "/run" + func_signature = signature(func) + try: + config = ( + config_schema() if config_schema else None + ) # we initialize the config object to be able to use it + except ValidationError as e: + raise ValueError( + f"Error initializing config_schema. Please ensure all required fields have default values: {str(e)}" + ) from e + except Exception as e: + raise ValueError( + f"Unexpected error initializing config_schema: {str(e)}" + ) from e + + config_params = config.dict() if config else ag.config.all() + ingestible_files = self.extract_ingestible_files(func_signature) + + self.route_path = route_path + + ### --- Playground --- # + @wraps(func) + async def wrapper(*args, **kwargs) -> Any: + func_params, api_config_params = self.split_kwargs(kwargs, config_params) + self.ingest_files(func_params, ingestible_files) + + with routing_context_manager( + config=api_config_params, + ): + entrypoint_result = await self.execute_function( + func, + True, # inline trace: True + *args, + params=func_params, + config_params=config_params, + ) + + return entrypoint_result + + self.update_function_signature( + wrapper=wrapper, + func_signature=func_signature, + config_class=config, + config_dict=config_params, + ingestible_files=ingestible_files, + ) + + # + if route_path == "": + route = f"/{DEFAULT_PATH}" + app.post(route, response_model=BaseResponse)(wrapper) + entrypoint.routes.append( + { + "func": func.__name__, + "endpoint": route, + "params": ( + {**config_params, **func_signature.parameters} + if not config + else func_signature.parameters + ), + "config": config, + } + ) + + route = f"{PLAYGROUND_PATH}{RUN_PATH}{route_path}" + app.post(route, response_model=BaseResponse)(wrapper) + entrypoint.routes.append( + { + "func": func.__name__, + "endpoint": route, + "params": ( + {**config_params, **func_signature.parameters} + if not config + else func_signature.parameters + ), + "config": config, + } + ) + ### ---------------------------- # + + ### --- Deployed --- # + @wraps(func) + async def wrapper_deployed(*args, **kwargs) -> Any: + func_params = { + k: v + for k, v in kwargs.items() + if k not in ["config", "environment", "app"] + } + if not config_schema: + if "environment" in kwargs and kwargs["environment"] is not None: + ag.config.pull(environment_name=kwargs["environment"]) + elif "config" in kwargs and kwargs["config"] is not None: + ag.config.pull(config_name=kwargs["config"]) + else: + ag.config.pull(config_name="default") + + app_id = environ.get("AGENTA_APP_ID") + + with routing_context_manager( + application={ + "id": app_id, + "slug": kwargs.get("app"), + }, + variant={ + "slug": kwargs.get("config"), + }, + environment={ + "slug": kwargs.get("environment"), + }, + ): + entrypoint_result = await self.execute_function( + func, + False, # inline trace: False + *args, + params=func_params, + config_params=config_params, + ) + + return entrypoint_result + + self.update_deployed_function_signature( + wrapper_deployed, + func_signature, + ingestible_files, + ) + if route_path == "": + route_deployed = f"/{DEFAULT_PATH}_deployed" + app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed) + + route_deployed = f"{RUN_PATH}{route_path}" + app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed) + ### ---------------- # + + ### --- Update OpenAPI --- # + app.openapi_schema = None # Forces FastAPI to re-generate the schema + openapi_schema = app.openapi() + + # Inject the current version of the SDK into the openapi_schema + openapi_schema["agenta_sdk"] = {"version": helpers.get_current_version()} + + for route in entrypoint.routes: + self.override_schema( + openapi_schema=openapi_schema, + func_name=route["func"], + endpoint=route["endpoint"], + params=route["params"], + ) + if route["config"] is not None: # new SDK version + # self.override_config_in_schema( + # openapi_schema=openapi_schema, + # func_name=route["func"], + # endpoint=route["endpoint"], + # config=route["config"], + # ) + pass + + if self.is_main_script(func) and route_path == "": + self.handle_terminal_run( + func, + func_signature.parameters, # type: ignore + config_params, + ingestible_files, + ) + + def extract_ingestible_files( + self, + func_signature: Signature, + ) -> Dict[str, Parameter]: + """Extract parameters annotated as InFile from function signature.""" + + return { + name: param + for name, param in func_signature.parameters.items() + if param.annotation is InFile + } + + def split_kwargs( + self, kwargs: Dict[str, Any], config_params: Dict[str, Any] + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """Split keyword arguments into function parameters and API configuration parameters.""" + + func_params = {k: v for k, v in kwargs.items() if k not in config_params} + api_config_params = {k: v for k, v in kwargs.items() if k in config_params} + return func_params, api_config_params + + def ingest_file(self, upfile: UploadFile): + temp_file = NamedTemporaryFile(delete=False) + temp_file.write(upfile.file.read()) + temp_file.close() + return InFile(file_name=upfile.filename, file_path=temp_file.name) + + def ingest_files( + self, + func_params: Dict[str, Any], + ingestible_files: Dict[str, Parameter], + ) -> None: + """Ingest files specified in function parameters.""" + + for name in ingestible_files: + if name in func_params and func_params[name] is not None: + func_params[name] = self.ingest_file(func_params[name]) + + async def execute_function( + self, + func: Callable[..., Any], + inline_trace, + *args, + **func_params, + ): + log.info("Agenta SDK - handling route: %s", repr(self.route_path or "/")) + + tracing_context.set(routing_context.get()) + + try: + result = ( + await func(*args, **func_params["params"]) + if iscoroutinefunction(func) + else func(*args, **func_params["params"]) + ) + + return await self.handle_success(result, inline_trace) + + except Exception as error: + self.handle_failure(error) + + async def handle_success(self, result: Any, inline_trace: bool): + data = None + tree = None + + with suppress(): + data = self.patch_result(result) + + if inline_trace: + tree = await self.fetch_inline_trace(inline_trace) + + log.info(f"----------------------------------") + log.info(f"Agenta SDK - exiting with success: 200") + log.info(f"----------------------------------") + + return BaseResponse(data=data, tree=tree) + + def handle_failure(self, error: Exception): + log.warning("--------------------------------------------------") + log.warning("Agenta SDK - handling application exception below:") + log.warning("--------------------------------------------------") + log.warning(format_exc().strip("\n")) + log.warning("--------------------------------------------------") + + status_code = 500 + message = str(error) + stacktrace = format_exception(error, value=error, tb=error.__traceback__) # type: ignore + detail = {"message": message, "stacktrace": stacktrace} + + raise HTTPException(status_code=status_code, detail=detail) + + def patch_result(self, result: Any): + """ + Patch the result to only include the message if the result is a FuncResponse-style dictionary with message, cost, and usage keys. + + Example: + ```python + result = { + "message": "Hello, world!", + "cost": 0.5, + "usage": { + "prompt_tokens": 10, + "completion_tokens": 20, + "total_tokens": 30 + } + } + result = patch_result(result) + print(result) + # Output: "Hello, world!" + ``` + """ + data = ( + result["message"] + if isinstance(result, dict) + and all(key in result for key in ["message", "cost", "usage"]) + else result + ) + + if data is None: + data = ( + "Function executed successfully, but did return None. \n Are you sure you did not forget to return a value?", + ) + + if not isinstance(result, dict): + data = str(data) + + return data + + async def fetch_inline_trace(self, inline_trace): + WAIT_FOR_SPANS = True + TIMEOUT = 1 + TIMESTEP = 0.1 + FINALSTEP = 0.001 + NOFSTEPS = TIMEOUT / TIMESTEP + + trace = None + + root_context: Dict[str, Any] = tracing_context.get().get("root") + + trace_id = root_context.get("trace_id") if root_context else None + + if trace_id is not None: + if inline_trace: + if WAIT_FOR_SPANS: + remaining_steps = NOFSTEPS + + while ( + not ag.tracing.is_inline_trace_ready(trace_id) + and remaining_steps > 0 + ): + await sleep(TIMESTEP) + + remaining_steps -= 1 + + await sleep(FINALSTEP) + + trace = ag.tracing.get_inline_trace(trace_id) + else: + trace = {"trace_id": trace_id} + + return trace + + def update_wrapper_signature( + self, wrapper: Callable[..., Any], updated_params: List + ): + """ + Updates the signature of a wrapper function with a new list of parameters. + + Args: + wrapper (callable): A callable object, such as a function or a method, that requires a signature update. + updated_params (List[Parameter]): A list of `Parameter` objects representing the updated parameters + for the wrapper function. + """ + + wrapper_signature = signature(wrapper) + wrapper_signature = wrapper_signature.replace(parameters=updated_params) + wrapper.__signature__ = wrapper_signature # type: ignore + + def update_function_signature( + self, + wrapper: Callable[..., Any], + func_signature: Signature, + config_class: Type[BaseModel], # TODO: change to our type + config_dict: Dict[str, Any], + ingestible_files: Dict[str, Parameter], + ) -> None: + """Update the function signature to include new parameters.""" + + updated_params: List[Parameter] = [] + updated_params.append( + Parameter( + name="agenta_config", + kind=Parameter.KEYWORD_ONLY, + annotation=config_class, + default=Body(...), + ) + ) + + self.add_func_params_to_parser(updated_params, func_signature, ingestible_files) + self.update_wrapper_signature(wrapper, updated_params) + + def update_deployed_function_signature( + self, + wrapper: Callable[..., Any], + func_signature: Signature, + ingestible_files: Dict[str, Parameter], + ) -> None: + """Update the function signature to include new parameters.""" + + updated_params: List[Parameter] = [] + self.add_func_params_to_parser(updated_params, func_signature, ingestible_files) + for param in [ + "config", + "environment", + ]: # we add the config and environment parameters + updated_params.append( + Parameter( + name=param, + kind=Parameter.KEYWORD_ONLY, + default=Body(None), + annotation=str, + ) + ) + self.update_wrapper_signature(wrapper, updated_params) + + def add_config_params_to_parser( + self, updated_params: list, config_class: Type[BaseModel] + ) -> None: + """Add configuration parameters to function signature.""" + for name, field in config_class.__fields__.items(): + assert field.default is not None, f"Field {name} has no default value" + updated_params.append( + Parameter( + name=name, + kind=Parameter.KEYWORD_ONLY, + annotation=field.annotation.__name__, + default=Body(field.default), + ) + ) + + def deprecated_add_config_params_to_parser( + self, updated_params: list, config_dict: Dict[str, Any] + ) -> None: + """Add configuration parameters to function signature.""" + for name, param in config_dict.items(): + assert ( + len(param.__class__.__bases__) == 1 + ), f"Inherited standard type of {param.__class__} needs to be one." + updated_params.append( + Parameter( + name=name, + kind=Parameter.KEYWORD_ONLY, + default=Body(param), + annotation=param.__class__.__bases__[ + 0 + ], # determines and get the base (parent/inheritance) type of the sdk-type at run-time. \ + # E.g __class__ is ag.MessagesInput() and accessing it parent type will return (,), \ + # thus, why we are accessing the first item. + ) + ) + + def add_func_params_to_parser( + self, + updated_params: list, + func_signature: Signature, + ingestible_files: Dict[str, Parameter], + ) -> None: + """Add function parameters to function signature.""" + for name, param in func_signature.parameters.items(): + if name in ingestible_files: + updated_params.append( + Parameter(name, param.kind, annotation=UploadFile) + ) + else: + assert ( + len(param.default.__class__.__bases__) == 1 + ), f"Inherited standard type of {param.default.__class__} needs to be one." + updated_params.append( + Parameter( + name, + Parameter.KEYWORD_ONLY, + default=Body(..., embed=True), + annotation=param.default.__class__.__bases__[ + 0 + ], # determines and get the base (parent/inheritance) type of the sdk-type at run-time. \ + # E.g __class__ is ag.MessagesInput() and accessing it parent type will return (,), \ + # thus, why we are accessing the first item. + ) + ) + + def is_main_script(self, func: Callable) -> bool: + """ + Check if the script containing the function is the main script being run. + + Args: + func (Callable): The function object to check. + + Returns: + bool: True if the script containing the function is the main script, False otherwise. + + Example: + if is_main_script(my_function): + print("This is the main script.") + """ + return func.__module__ == "__main__" + + def handle_terminal_run( + self, + func: Callable, + func_params: Dict[str, Parameter], + config_params: Dict[str, Any], + ingestible_files: Dict, + ): + """ + Parses command line arguments and sets configuration when script is run from the terminal. + + Args: + func_params (dict): A dictionary containing the function parameters and their annotations. + config_params (dict): A dictionary containing the configuration parameters. + ingestible_files (dict): A dictionary containing the files that should be ingested. + """ + + # For required parameters, we add them as arguments + parser = ArgumentParser() + for name, param in func_params.items(): + if name in ingestible_files: + parser.add_argument(name, type=str) + else: + parser.add_argument(name, type=param.annotation) + + for name, param in config_params.items(): + if type(param) is MultipleChoiceParam: + parser.add_argument( + f"--{name}", + type=str, + default=param.default, + choices=param.choices, # type: ignore + ) + else: + parser.add_argument( + f"--{name}", + type=type(param), + default=param, + ) + + args = parser.parse_args() + + # split the arg list into the arg in the app_param and + # the args from the sig.parameter + args_config_params = {k: v for k, v in vars(args).items() if k in config_params} + args_func_params = { + k: v for k, v in vars(args).items() if k not in config_params + } + for name in ingestible_files: + args_func_params[name] = InFile( + file_name=Path(args_func_params[name]).stem, + file_path=args_func_params[name], + ) + + # Update args_config_params with default values from config_params if not provided in command line arguments + args_config_params.update( + { + key: value + for key, value in config_params.items() + if key not in args_config_params + } + ) + + loop = get_event_loop() + + with routing_context_manager(config=args_config_params): + result = loop.run_until_complete( + self.execute_function( + func, + True, # inline trace: True + **{"params": args_func_params, "config_params": args_config_params}, + ) + ) + + if result.trace: + log.info("\n========= Result =========\n") + + log.info(f"trace_id: {result.trace['trace_id']}") + log.info(f"latency: {result.trace.get('latency')}") + log.info(f"cost: {result.trace.get('cost')}") + log.info(f"usage: {list(result.trace.get('usage', {}).values())}") + + log.info(" ") + log.info("data:") + log.info(dumps(result.data, indent=2)) + + log.info(" ") + log.info("trace:") + log.info("----------------") + log.info(dumps(result.trace.get("spans", []), indent=2)) + log.info("----------------") + + log.info("\n==========================\n") + + def override_config_in_schema( + self, + openapi_schema: dict, + func_name: str, + endpoint: str, + config: Type[BaseModel], + ): + endpoint = endpoint[1:].replace("/", "_") + schema_to_override = openapi_schema["components"]["schemas"][ + f"Body_{func_name}_{endpoint}_post" + ]["properties"] + # New logic + for param_name, param_val in config.__fields__.items(): + if param_val.annotation is str: + if any( + isinstance(constraint, MultipleChoice) + for constraint in param_val.metadata + ): + choices = next( + constraint.choices + for constraint in param_val.metadata + if isinstance(constraint, MultipleChoice) + ) + if isinstance(choices, dict): + schema_to_override[param_name]["x-parameter"] = "grouped_choice" + schema_to_override[param_name]["choices"] = choices + elif isinstance(choices, list): + schema_to_override[param_name]["x-parameter"] = "choice" + schema_to_override[param_name]["enum"] = choices + else: + schema_to_override[param_name]["x-parameter"] = "text" + if param_val.annotation is bool: + schema_to_override[param_name]["x-parameter"] = "bool" + if param_val.annotation in (int, float): + schema_to_override[param_name]["x-parameter"] = ( + "int" if param_val.annotation is int else "float" + ) + # Check for greater than or equal to constraint + if any(isinstance(constraint, Ge) for constraint in param_val.metadata): + min_value = next( + constraint.ge + for constraint in param_val.metadata + if isinstance(constraint, Ge) + ) + schema_to_override[param_name]["minimum"] = min_value + # Check for greater than constraint + elif any( + isinstance(constraint, Gt) for constraint in param_val.metadata + ): + min_value = next( + constraint.gt + for constraint in param_val.metadata + if isinstance(constraint, Gt) + ) + schema_to_override[param_name]["exclusiveMinimum"] = min_value + # Check for less than or equal to constraint + if any(isinstance(constraint, Le) for constraint in param_val.metadata): + max_value = next( + constraint.le + for constraint in param_val.metadata + if isinstance(constraint, Le) + ) + schema_to_override[param_name]["maximum"] = max_value + # Check for less than constraint + elif any( + isinstance(constraint, Lt) for constraint in param_val.metadata + ): + max_value = next( + constraint.lt + for constraint in param_val.metadata + if isinstance(constraint, Lt) + ) + schema_to_override[param_name]["exclusiveMaximum"] = max_value + + def override_schema( + self, openapi_schema: dict, func_name: str, endpoint: str, params: dict + ): + """ + Overrides the default openai schema generated by fastapi with additional information about: + - The choices available for each MultipleChoiceParam instance + - The min and max values for each FloatParam instance + - The min and max values for each IntParam instance + - The default value for DictInput instance + - The default value for MessagesParam instance + - The default value for FileInputURL instance + - The default value for BinaryParam instance + - ... [PLEASE ADD AT EACH CHANGE] + + Args: + openapi_schema (dict): The openapi schema generated by fastapi + func (str): The name of the function to override + endpoint (str): The name of the endpoint to override + params (dict(param_name, param_val)): The dictionary of the parameters for the function + """ + + def find_in_schema( + schema_type_properties: dict, schema: dict, param_name: str, xparam: str + ): + """Finds a parameter in the schema based on its name and x-parameter value""" + for _, value in schema.items(): + value_title_lower = str(value.get("title")).lower() + value_title = ( + "_".join(value_title_lower.split()) + if len(value_title_lower.split()) >= 2 + else value_title_lower + ) + + if ( + isinstance(value, dict) + and schema_type_properties.get("x-parameter") == xparam + and value_title == param_name + ): + # this will update the default type schema with the properties gotten + # from the schema type (param_val) __schema_properties__ classmethod + for type_key, type_value in schema_type_properties.items(): + # BEFORE: + # value = {'temperature': {'title': 'Temperature'}} + value[type_key] = type_value + # AFTER: + # value = {'temperature': { "type": "number", "title": "Temperature", "x-parameter": "float" }} + return value + + def get_type_from_param(param_val): + param_type = "string" + annotation = param_val.annotation + + if annotation == int: + param_type = "integer" + elif annotation == float: + param_type = "number" + elif annotation == dict: + param_type = "object" + elif annotation == bool: + param_type = "boolean" + elif annotation == list: + param_type = "list" + elif annotation == str: + param_type = "string" + else: + print("ERROR, unhandled annotation:", annotation) + + return param_type + + # Goes from '/some/path' to 'some_path' + endpoint = endpoint[1:].replace("/", "_") + + schema_to_override = openapi_schema["components"]["schemas"][ + f"Body_{func_name}_{endpoint}_post" + ]["properties"] + + for param_name, param_val in params.items(): + if isinstance(param_val, GroupedMultipleChoiceParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "grouped_choice", + ) + assert ( + subschema + ), f"GroupedMultipleChoiceParam '{param_name}' is in the parameters but could not be found in the openapi.json" + subschema["choices"] = param_val.choices # type: ignore + subschema["default"] = param_val.default # type: ignore + + elif isinstance(param_val, MultipleChoiceParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "choice", + ) + default = str(param_val) + param_choices = param_val.choices # type: ignore + choices = ( + [default] + param_choices + if param_val not in param_choices + else param_choices + ) + subschema["enum"] = choices + subschema["default"] = ( + default if default in param_choices else choices[0] + ) + + elif isinstance(param_val, FloatParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "float", + ) + subschema["minimum"] = param_val.minval # type: ignore + subschema["maximum"] = param_val.maxval # type: ignore + subschema["default"] = param_val + + elif isinstance(param_val, IntParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "int", + ) + subschema["minimum"] = param_val.minval # type: ignore + subschema["maximum"] = param_val.maxval # type: ignore + subschema["default"] = param_val + + elif isinstance(param_val, Parameter) and param_val.annotation is DictInput: + subschema = find_in_schema( + param_val.annotation.__schema_type_properties__(), + schema_to_override, + param_name, + "dict", + ) + subschema["default"] = param_val.default["default_keys"] + + elif isinstance(param_val, TextParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "text", + ) + subschema["default"] = param_val + + elif ( + isinstance(param_val, Parameter) + and param_val.annotation is MessagesInput + ): + subschema = find_in_schema( + param_val.annotation.__schema_type_properties__(), + schema_to_override, + param_name, + "messages", + ) + subschema["default"] = param_val.default + + elif ( + isinstance(param_val, Parameter) + and param_val.annotation is FileInputURL + ): + subschema = find_in_schema( + param_val.annotation.__schema_type_properties__(), + schema_to_override, + param_name, + "file_url", + ) + subschema["default"] = "https://example.com" + + elif isinstance(param_val, BinaryParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "bool", + ) + subschema["default"] = param_val.default # type: ignore + else: + subschema = { + "title": str(param_name).capitalize(), + "type": get_type_from_param(param_val), + } + if param_val.default != _empty: + subschema["default"] = param_val.default # type: ignore + schema_to_override[param_name] = subschema diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/decorators/tracing.py b/agenta-cli/debugging/simple-app/agenta/sdk/decorators/tracing.py new file mode 100644 index 0000000000..68f707b694 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/decorators/tracing.py @@ -0,0 +1,274 @@ +from typing import Callable, Optional, Any, Dict, List, Union +from functools import wraps +from itertools import chain +from inspect import iscoroutinefunction, getfullargspec + +from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.context.tracing import tracing_context +from agenta.sdk.tracing.conventions import parse_span_kind + +import agenta as ag + + +class instrument: # pylint: disable=invalid-name + DEFAULT_KEY = "__default__" + + def __init__( + self, + type: str = "task", # pylint: disable=redefined-builtin + config: Optional[Dict[str, Any]] = None, + ignore_inputs: Optional[bool] = None, + ignore_outputs: Optional[bool] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + max_depth: Optional[int] = 2, + # DEPRECATING + kind: str = "task", + spankind: Optional[str] = "TASK", + ) -> None: + self.type = spankind or kind or type + self.kind = None + self.config = config + self.ignore_inputs = ignore_inputs + self.ignore_outputs = ignore_outputs + self.redact = redact + self.redact_on_error = redact_on_error + self.max_depth = max_depth + + def __call__(self, func: Callable[..., Any]): + is_coroutine_function = iscoroutinefunction(func) + + @wraps(func) + async def async_wrapper(*args, **kwargs): + async def _async_auto_instrumented(*args, **kwargs): + self._parse_type_and_kind() + + with ag.tracer.start_as_current_span(func.__name__, kind=self.kind): + self._pre_instrument(func, *args, **kwargs) + + result = await func(*args, **kwargs) + + self._post_instrument(result) + + return result + + return await _async_auto_instrumented(*args, **kwargs) + + @wraps(func) + def sync_wrapper(*args, **kwargs): + def _sync_auto_instrumented(*args, **kwargs): + self._parse_type_and_kind() + + with ag.tracer.start_as_current_span(func.__name__, kind=self.kind): + self._pre_instrument(func, *args, **kwargs) + + result = func(*args, **kwargs) + + self._post_instrument(result) + + return result + + return _sync_auto_instrumented(*args, **kwargs) + + return async_wrapper if is_coroutine_function else sync_wrapper + + def _parse_type_and_kind(self): + if not ag.tracing.get_current_span().is_recording(): + self.type = "workflow" + + self.kind = parse_span_kind(self.type) + + def _pre_instrument( + self, + func, + *args, + **kwargs, + ): + span = ag.tracing.get_current_span() + + with suppress(): + span.set_attributes( + attributes={"node": self.type}, + namespace="type", + ) + + if span.parent is None: + rctx = tracing_context.get() + + span.set_attributes( + attributes={"configuration": rctx.get("config", {})}, + namespace="meta", + ) + span.set_attributes( + attributes={"environment": rctx.get("environment", {})}, + namespace="meta", + ) + span.set_attributes( + attributes={"version": rctx.get("version", {})}, + namespace="meta", + ) + span.set_attributes( + attributes={"variant": rctx.get("variant", {})}, + namespace="meta", + ) + + _inputs = self._redact( + name=span.name, + field="inputs", + io=self._parse(func, *args, **kwargs), + ignore=self.ignore_inputs, + ) + span.set_attributes( + attributes={"inputs": _inputs}, + namespace="data", + max_depth=self.max_depth, + ) + + def _post_instrument( + self, + result, + ): + span = ag.tracing.get_current_span() + with suppress(): + cost = None + usage = {} + + if isinstance(result, dict): + cost = result.get("cost", None) + usage = result.get("usage", {}) + + if isinstance(usage, (int, float)): + usage = {"total_tokens": usage} + + span.set_attributes( + attributes={"total": cost}, + namespace="metrics.unit.costs", + ) + span.set_attributes( + attributes=( + { + "prompt": usage.get("prompt_tokens", None), + "completion": usage.get("completion_tokens", None), + "total": usage.get("total_tokens", None), + } + ), + namespace="metrics.unit.tokens", + ) + + _outputs = self._redact( + name=span.name, + field="outputs", + io=self._patch(result), + ignore=self.ignore_outputs, + ) + span.set_attributes( + attributes={"outputs": _outputs}, + namespace="data", + max_depth=self.max_depth, + ) + + span.set_status("OK") + + with suppress(): + if hasattr(span, "parent") and span.parent is None: + tracing_context.set( + tracing_context.get() + | { + "root": { + "trace_id": span.get_span_context().trace_id, + "span_id": span.get_span_context().span_id, + } + } + ) + + def _parse( + self, + func, + *args, + **kwargs, + ) -> Dict[str, Any]: + inputs = { + key: value + for key, value in chain( + zip(getfullargspec(func).args, args), + kwargs.items(), + ) + } + + return inputs + + def _redact( + self, + *, + name: str, + field: str, + io: Dict[str, Any], + ignore: Union[List[str], bool] = False, + ) -> Dict[str, Any]: + """ + Redact user-defined sensitive information + from inputs and outputs as defined by the ignore list or boolean flag. + + Example: + - ignore = ["password"] -> {"username": "admin", "password": "********"} + -> {"username": "admin"} + - ignore = True -> {"username": "admin", "password": "********"} + -> {} + - ignore = False -> {"username": "admin", "password": "********"} + -> {"username": "admin", "password": "********"} + """ + io = { + key: value + for key, value in io.items() + if key + not in ( + ignore + if isinstance(ignore, list) + else io.keys() + if ignore is True + else [] + ) + } + + if self.redact is not None: + try: + io = self.redact(name, field, io) + except: # pylint: disable=bare-except + if self.redact_on_error: + io = {} + + if ag.tracing.redact is not None: + try: + io = ag.tracing.redact(name, field, io) + except: # pylint: disable=bare-except + if ag.tracing.redact_on_error: + io = {} + + return io + + def _patch( + self, + result: Any, + ) -> Dict[str, Any]: + """ + Patch the result to ensure that it is a dictionary, with a default key when necessary. + + Example: + - result = "Hello, World!" + -> {"__default__": "Hello, World!"} + - result = {"message": "Hello, World!", "cost": 0.0, "usage": {}} + -> {"__default__": "Hello, World!"} + - result = {"message": "Hello, World!"} + -> {"message": "Hello, World!"} + """ + outputs = ( + {instrument.DEFAULT_KEY: result} + if not isinstance(result, dict) + else ( + {instrument.DEFAULT_KEY: result["message"]} + if all(key in result for key in ["message", "cost", "usage"]) + else result + ) + ) + + return outputs diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/litellm/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/litellm/__init__.py new file mode 100644 index 0000000000..e9ce42ea24 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/litellm/__init__.py @@ -0,0 +1 @@ +from .litellm import litellm_handler diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/litellm/litellm.py b/agenta-cli/debugging/simple-app/agenta/sdk/litellm/litellm.py new file mode 100644 index 0000000000..d6e2e57c14 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/litellm/litellm.py @@ -0,0 +1,314 @@ +from typing import Dict +from opentelemetry.trace import SpanKind + +import agenta as ag + +from agenta.sdk.tracing.spans import CustomSpan +from agenta.sdk.utils.exceptions import suppress # TODO: use it ! +from agenta.sdk.utils.logging import log + + +def litellm_handler(): + try: + from litellm.integrations.custom_logger import ( # pylint: disable=import-outside-toplevel + CustomLogger as LitellmCustomLogger, + ) + except ImportError as exc: + raise ImportError( + "The litellm SDK is not installed. Please install it using `pip install litellm`." + ) from exc + except Exception as exc: + raise Exception( # pylint: disable=broad-exception-raised + f"Unexpected error occurred when importing litellm: {exc}" + ) from exc + + class LitellmHandler(LitellmCustomLogger): + """ + This handler is responsible for instrumenting certain events, + when using litellm to call LLMs. + + Args: + LitellmCustomLogger (object): custom logger that allows us + to override the events to capture. + """ + + def __init__(self): + super().__init__() + + self.span: Dict[str, CustomSpan] = dict() + + def log_pre_api_call( + self, + model, + messages, + kwargs, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + type = ( # pylint: disable=redefined-builtin + "chat" + if kwargs.get("call_type") in ["completion", "acompletion"] + else "embedding" + ) + + kind = SpanKind.CLIENT + + self.span[litellm_call_id] = CustomSpan( + ag.tracer.start_span(name=f"litellm_{kind.name.lower()}", kind=kind) + ) + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + log.error("Agenta SDK - litellm span not recording.") + return + + span.set_attributes( + attributes={"node": type}, + namespace="type", + ) + + span.set_attributes( + attributes={"inputs": {"prompt": kwargs["messages"]}}, + namespace="data", + ) + + span.set_attributes( + attributes={ + "configuration": { + "model": kwargs.get("model"), + **kwargs.get("optional_params"), + } + }, + namespace="meta", + ) + + def log_stream_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + def log_success_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + if kwargs.get("stream"): + return + + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + try: + result = [] + for choice in response_obj.choices: + message = choice.message.__dict__ + result.append(message) + + outputs = {"completion": result} + span.set_attributes( + attributes={"outputs": outputs}, + namespace="data", + ) + + except Exception as e: + pass + + span.set_attributes( + attributes={"total": kwargs.get("response_cost")}, + namespace="metrics.unit.costs", + ) + + span.set_attributes( + attributes=( + { + "prompt": response_obj.usage.prompt_tokens, + "completion": response_obj.usage.completion_tokens, + "total": response_obj.usage.total_tokens, + } + ), + namespace="metrics.unit.tokens", + ) + + span.set_status(status="OK") + + span.end() + + def log_failure_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + span.record_exception(kwargs["exception"]) + + span.set_status(status="ERROR") + + span.end() + + async def async_log_stream_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + if kwargs.get("stream"): + return + + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + async def async_log_success_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + try: + result = [] + for choice in response_obj.choices: + message = choice.message.__dict__ + result.append(message) + + outputs = {"completion": result} + span.set_attributes( + attributes={"outputs": outputs}, + namespace="data", + ) + + except Exception as e: + pass + + span.set_attributes( + attributes={"total": kwargs.get("response_cost")}, + namespace="metrics.unit.costs", + ) + + span.set_attributes( + attributes=( + { + "prompt": response_obj.usage.prompt_tokens, + "completion": response_obj.usage.completion_tokens, + "total": response_obj.usage.total_tokens, + } + ), + namespace="metrics.unit.tokens", + ) + + span.set_status(status="OK") + + span.end() + + async def async_log_failure_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + span.record_exception(kwargs["exception"]) + + span.set_status(status="ERROR") + + span.end() + + return LitellmHandler() diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/managers/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/managers/__init__.py new file mode 100644 index 0000000000..53f8e20935 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/managers/__init__.py @@ -0,0 +1,6 @@ +from agenta.sdk.managers.config import ConfigManager +from agenta.sdk.managers.variant import VariantManager +from agenta.sdk.managers.deployment import DeploymentManager + + +__all__ = ["ConfigManager", "VariantManager", "DeploymentManager"] diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/managers/config.py b/agenta-cli/debugging/simple-app/agenta/sdk/managers/config.py new file mode 100644 index 0000000000..edadbaedc0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/managers/config.py @@ -0,0 +1,318 @@ +import json +import logging +from pathlib import Path +from typing import Optional, Type, TypeVar, Dict, Any, Union + +import yaml +from pydantic import BaseModel + +from agenta.sdk.managers.shared import SharedManager +from agenta.sdk.decorators.routing import routing_context + +T = TypeVar("T", bound=BaseModel) + +logger = logging.getLogger(__name__) + +AVAILABLE_ENVIRONMENTS = ["development", "production", "staging"] + + +class ConfigManager: + @staticmethod + def get_from_route( + schema: Optional[Type[T]] = None, + ) -> Union[Dict[str, Any], T]: + """ + Retrieves the configuration from the route context and returns a config object. + + This method checks the route context for configuration information and returns + an instance of the specified schema based on the available context data. + + Args: + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + ValueError: If conflicting configuration sources are provided or if no valid + configuration source is found in the context. + + Note: + The method prioritizes the inputs in the following way: + 1. 'config' (i.e. when called explicitly from the playground) + 2. 'environment' + 3. 'variant' + Only one of these should be provided. + """ + + context = routing_context.get() + + parameters = None + + if "config" in context and context["config"]: + parameters = context["config"] + + else: + app_id: Optional[str] = None + app_slug: Optional[str] = None + variant_id: Optional[str] = None + variant_slug: Optional[str] = None + variant_version: Optional[int] = None + environment_id: Optional[str] = None + environment_slug: Optional[str] = None + environment_version: Optional[int] = None + + if "application" in context: + app_id = context["application"].get("id") + app_slug = context["application"].get("slug") + + if "variant" in context: + variant_id = context["variant"].get("id") + variant_slug = context["variant"].get("slug") + variant_version = context["variant"].get("version") + + if "environment" in context: + environment_id = context["environment"].get("id") + environment_slug = context["environment"].get("slug") + environment_version = context["environment"].get("version") + + parameters = ConfigManager.get_from_registry( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + if schema: + return schema(**parameters) + + return parameters + + @staticmethod + async def aget_from_route( + schema: Optional[Type[T]] = None, + ) -> Union[Dict[str, Any], T]: + """ + Asynchronously retrieves the configuration from the route context and returns a config object. + + This method checks the route context for configuration information and returns + an instance of the specified schema based on the available context data. + + Args: + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + ValueError: If conflicting configuration sources are provided or if no valid + configuration source is found in the context. + + Note: + The method prioritizes the inputs in the following way: + 1. 'config' (i.e. when called explicitly from the playground) + 2. 'environment' + 3. 'variant' + Only one of these should be provided. + """ + + context = routing_context.get() + + parameters = None + + if "config" in context and context["config"]: + parameters = context["config"] + + else: + app_id: Optional[str] = None + app_slug: Optional[str] = None + variant_id: Optional[str] = None + variant_slug: Optional[str] = None + variant_version: Optional[int] = None + environment_id: Optional[str] = None + environment_slug: Optional[str] = None + environment_version: Optional[int] = None + + if "application" in context: + app_id = context["application"].get("id") + app_slug = context["application"].get("slug") + + if "variant" in context: + variant_id = context["variant"].get("id") + variant_slug = context["variant"].get("slug") + variant_version = context["variant"].get("version") + + if "environment" in context: + environment_id = context["environment"].get("id") + environment_slug = context["environment"].get("slug") + environment_version = context["environment"].get("version") + + parameters = await ConfigManager.async_get_from_registry( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + if schema: + return schema(**parameters) + + return parameters + + @staticmethod + def get_from_registry( + schema: Optional[Type[T]] = None, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ) -> Union[Dict[str, Any], T]: + """ + Pulls the parameters for the app variant from the server and returns a config object. + + This method retrieves the configuration from the backend server based on the provided + environment or variant. It then validates and returns the configuration as an instance + of the specified schema. + + Args: + app_slug (str): The unique identifier for the application whose configuration is to be fetched. + variant_slug (Optional[str]): The variant name to fetch the configuration for. Defaults to None. + variant_version (Optional[int]): The version number of the variant to fetch. Defaults to None. + environment_slug (Optional[str]): The environment name to fetch the configuration for. + Must be one of "development", "production", or "staging". Defaults to None. + + Raises: + Exception: For any other errors during the process (e.g., API communication issues). + """ + config = SharedManager.fetch( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + if schema: + return schema(**config.params) + + return config.params + + @staticmethod + async def aget_from_registry( + schema: Optional[Type[T]] = None, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ) -> Union[Dict[str, Any], T]: + """ + Pulls the parameters for the app variant from the server and returns a config object. + + This method retrieves the configuration from the backend server based on the provided + environment or variant. It then validates and returns the configuration as an instance + of the specified schema. + + Args: + app_slug (str): The unique identifier for the application whose configuration is to be fetched. + variant_slug (Optional[str]): The variant name to fetch the configuration for. Defaults to None. + variant_version (Optional[int]): The version number of the variant to fetch. Defaults to None. + environment_slug (Optional[str]): The environment name to fetch the configuration for. + Must be one of "development", "production", or "staging". Defaults to None. + + Raises: + Exception: For any other errors during the process (e.g., API communication issues). + """ + config = await SharedManager.afetch( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + if schema: + return schema(**config.params) + + return config.params + + @staticmethod + def get_from_yaml( + filename: str, + schema: Optional[Type[T]] = None, + ) -> T: + """ + Loads configuration from a YAML file and returns a config object. + + Args: + filename (str): The name of the YAML file to load. + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + FileNotFoundError: If the specified file doesn't exist. + ValidationError: If the loaded configuration data doesn't match the schema. + """ + file_path = Path(filename) + + with open(file_path, "r", encoding="utf-8") as file: + parameters = yaml.safe_load(file) + + if schema: + return schema(**parameters) + + return parameters + + @staticmethod + def get_from_json( + filename: str, + schema: Optional[Type[T]] = None, + ) -> T: + """ + Loads configuration from a JSON file and returns a config object. + + Args: + filename (str): The name of the JSON file to load. + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + FileNotFoundError: If the specified file doesn't exist. + ValidationError: If the loaded configuration data doesn't match the schema. + """ + file_path = Path(filename) + + with open(file_path, "r", encoding="utf-8") as file: + parameters = json.load(file) + + if schema: + return schema(**parameters) + + return parameters diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/managers/deployment.py b/agenta-cli/debugging/simple-app/agenta/sdk/managers/deployment.py new file mode 100644 index 0000000000..458170c853 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/managers/deployment.py @@ -0,0 +1,45 @@ +from typing import Optional + +from agenta.sdk.managers.shared import SharedManager + + +class DeploymentManager: + @classmethod + def deploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + deployment = SharedManager.deploy( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + variant_version=variant_version, + environment_slug=environment_slug, + ) + return deployment + + @classmethod + async def adeploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + deployment = await SharedManager.adeploy( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + variant_version=variant_version, + environment_slug=environment_slug, + ) + return deployment diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/managers/shared.py b/agenta-cli/debugging/simple-app/agenta/sdk/managers/shared.py new file mode 100644 index 0000000000..40e2fbd7b2 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/managers/shared.py @@ -0,0 +1,639 @@ +import logging +from typing import Optional, Dict, Any + +from agenta.sdk.utils.exceptions import handle_exceptions + +from agenta.sdk.types import ( + ConfigurationResponse, + DeploymentResponse, +) +from agenta.client.backend.types.config_dto import ConfigDto as ConfigRequest +from agenta.client.backend.types.config_response_model import ConfigResponseModel +from agenta.client.backend.types.reference_request_model import ReferenceRequestModel + +import agenta as ag + + +logger = logging.getLogger(__name__) + + +class SharedManager: + """ + SharedManager is a utility class that serves as an interface for managing + application configurations, variants, and deployments through the Agenta API. + It provides both synchronous and asynchronous methods, allowing flexibility + depending on the context of use (e.g., blocking or non-blocking environments). + + Attributes: + client (AgentaApi): Synchronous client for interacting with the Agenta API. + aclient (AsyncAgentaApi): Asynchronous client for interacting with the Agenta API. + + Notes: + - The class manages both synchronous and asynchronous interactions with the API, allowing users to + select the method that best fits their needs. + - Methods prefixed with 'a' (e.g., aadd, afetch) are designed to be used in asynchronous environments. + """ + + @classmethod + def _parse_fetch_request( + cls, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + if variant_slug and not (app_id or app_slug): + raise ValueError("`variant_slug` requires `app_id` or `app_slug`") + if variant_version and not variant_slug: + raise ValueError("`variant_version` requires `variant_slug`") + if environment_slug and not (app_id or app_slug): + raise ValueError("`environment_slug` requires `app_id` or `app_slug`") + if environment_version and not environment_slug: + raise ValueError("`environment_version` requires `environment_slug`") + + return { + "app_id": app_id, + "app_slug": app_slug, + "variant_id": variant_id, + "variant_slug": variant_slug, + "variant_version": variant_version, + "environment_id": environment_id, + "environment_slug": environment_slug, + "environment_version": environment_version, + } + + @classmethod + def _parse_config_response( + cls, + model: ConfigResponseModel, + ) -> Dict[str, Any]: + flattened: Dict[str, Any] = {} + + # Process application_ref + if model.application_ref: + flattened["app_id"] = model.application_ref.id + flattened["app_slug"] = model.application_ref.slug + + # Process variant_ref + if model.variant_ref: + flattened["variant_id"] = model.variant_ref.id + flattened["variant_slug"] = model.variant_ref.slug + flattened["variant_version"] = model.variant_ref.version + + # Process environment_ref + if model.environment_ref: + flattened["environment_id"] = model.environment_ref.id + flattened["environment_slug"] = model.environment_ref.slug + flattened["environment_version"] = model.environment_ref.version + + # Process variant_lifecycle + if model.variant_lifecycle: + flattened["committed_at"] = model.variant_lifecycle.updated_at + flattened["committed_by"] = model.variant_lifecycle.updated_by + flattened["committed_by_id"] = model.variant_lifecycle.updated_by_id + + # Process environment_lifecycle + if model.environment_lifecycle: + flattened["deployed_at"] = model.environment_lifecycle.created_at + flattened["deployed_by"] = model.environment_lifecycle.updated_by + flattened["deployed_by_id"] = model.environment_lifecycle.updated_by_id + + # Add parameters + flattened["params"] = model.params or {} + + return flattened + + @classmethod + def _ref_or_none( + cls, + *, + id: Optional[str] = None, + slug: Optional[str] = None, + version: Optional[int] = None, + ) -> Optional[ReferenceRequestModel]: + if not id and not slug and not version: + return None + + return ReferenceRequestModel(id=id, slug=slug, version=version) + + @classmethod + @handle_exceptions() + def add( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + config_response = ag.api.variants.configs_add( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + response = SharedManager._parse_config_response(config_response) + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def aadd( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + config_response = await ag.async_api.variants.configs_add( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def fetch( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ) -> ConfigurationResponse: + fetch_signatures = SharedManager._parse_fetch_request( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + config_response = ag.api.variants.configs_fetch( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["variant_slug"], + version=fetch_signatures["variant_version"], + id=fetch_signatures["variant_id"], + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["environment_slug"], + version=fetch_signatures["environment_version"], + id=fetch_signatures["environment_id"], + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["app_slug"], + version=None, + id=fetch_signatures["app_id"], + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def afetch( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + fetch_signatures = SharedManager._parse_fetch_request( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + config_response = await ag.async_api.variants.configs_fetch( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["variant_slug"], + version=fetch_signatures["variant_version"], + id=fetch_signatures["variant_id"], + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["environment_slug"], + version=fetch_signatures["environment_version"], + id=fetch_signatures["environment_id"], + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["app_slug"], + version=None, + id=fetch_signatures["app_id"], + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def list( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + configs_response = ag.api.variants.configs_list( # type: ignore + application_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + async def alist( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + configs_response = await ag.async_api.variants.configs_list( # type: ignore + application_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + def history( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + ): + configs_response = ag.api.variants.configs_history( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + async def ahistory( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + ): + configs_response = await ag.async_api.variants.configs_history( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + def fork( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + config_response = ag.api.variants.configs_fork( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=environment_version, + id=environment_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def afork( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + config_response = await ag.async_api.variants.configs_fork( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=environment_version, + id=environment_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def commit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant_ref = SharedManager._ref_or_none( # type: ignore # type: ignore + slug=variant_slug, + version=None, + id=None, + ) + application_ref = SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ) + config_response = ag.api.variants.configs_commit( # type: ignore + config=ConfigRequest( + params=parameters, + variant_ref=variant_ref.model_dump() if variant_ref else None, # type: ignore + application_ref=application_ref.model_dump() if application_ref else None, # type: ignore + ) + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def acommit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + config_response = await ag.async_api.variants.configs_commit( # type: ignore + config=ConfigRequest( + params=parameters, + variant_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=variant_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def deploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = ag.api.variants.configs_deploy( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=None, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return DeploymentResponse(**response) + + @classmethod + @handle_exceptions() + async def adeploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = await ag.async_api.variants.configs_deploy( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=None, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return DeploymentResponse(**response) + + @classmethod + @handle_exceptions() + def delete( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = ag.api.variants.configs_delete( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + return config_response + + @classmethod + @handle_exceptions() + async def adelete( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = await ag.async_api.variants.configs_delete( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + return config_response diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/managers/variant.py b/agenta-cli/debugging/simple-app/agenta/sdk/managers/variant.py new file mode 100644 index 0000000000..8df15eac47 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/managers/variant.py @@ -0,0 +1,182 @@ +from typing import Optional + +from agenta.sdk.managers.shared import SharedManager + + +class VariantManager(SharedManager): + @classmethod + def create( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = SharedManager.add( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + + if variant: + variant = SharedManager.commit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + + return variant + + @classmethod + async def acreate( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = await SharedManager.aadd( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + if variant: + variant = await SharedManager.acommit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + + return variant + + @classmethod + def commit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = SharedManager.commit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variant + + @classmethod + async def acommit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = await SharedManager.acommit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variant + + @classmethod + def delete( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + message = SharedManager.delete( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return message + + @classmethod + async def adelete( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + message = await SharedManager.adelete( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return message + + @classmethod + def list( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = SharedManager.list( + app_id=app_id, + app_slug=app_slug, + ) + return variants + + @classmethod + async def alist( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = await SharedManager.alist( + app_id=app_id, + app_slug=app_slug, + ) + return variants + + @classmethod + def history( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = SharedManager.history( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variants + + @classmethod + async def ahistory( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = await SharedManager.ahistory( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variants diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/middleware/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/middleware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/middleware/auth.py b/agenta-cli/debugging/simple-app/agenta/sdk/middleware/auth.py new file mode 100644 index 0000000000..1a0e492ec3 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/middleware/auth.py @@ -0,0 +1,145 @@ +from typing import Callable, Optional +from os import environ +from uuid import UUID +from json import dumps +from traceback import format_exc + +import httpx +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import FastAPI, Request, Response + +from agenta.sdk.utils.logging import log +from agenta.sdk.middleware.cache import TTLLRUCache + +AGENTA_SDK_AUTH_CACHE_CAPACITY = environ.get( + "AGENTA_SDK_AUTH_CACHE_CAPACITY", + 512, +) + +AGENTA_SDK_AUTH_CACHE_TTL = environ.get( + "AGENTA_SDK_AUTH_CACHE_TTL", + 15 * 60, # 15 minutes +) + +AGENTA_SDK_AUTH_CACHE = str(environ.get("AGENTA_SDK_AUTH_CACHE", True)).lower() in ( + "true", + "1", + "t", +) + +AGENTA_SDK_AUTH_CACHE = False + +AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED = str( + environ.get("AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED", True) +).lower() in ("true", "1", "t") + + +class Deny(Response): + def __init__(self) -> None: + super().__init__(status_code=401, content="Unauthorized") + + +cache = TTLLRUCache( + capacity=AGENTA_SDK_AUTH_CACHE_CAPACITY, + ttl=AGENTA_SDK_AUTH_CACHE_TTL, +) + + +class AuthorizationMiddleware(BaseHTTPMiddleware): + def __init__( + self, + app: FastAPI, + host: str, + resource_id: UUID, + resource_type: str, + ): + super().__init__(app) + + self.host = host + self.resource_id = resource_id + self.resource_type = resource_type + + async def dispatch( + self, + request: Request, + call_next: Callable, + ): + if AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED: + return await call_next(request) + + try: + authorization = ( + request.headers.get("Authorization") + or request.headers.get("authorization") + or None + ) + + headers = {"Authorization": authorization} if authorization else None + + cookies = {"sAccessToken": request.cookies.get("sAccessToken")} + + params = { + "action": "run_service", + "resource_type": self.resource_type, + "resource_id": self.resource_id, + } + + project_id = request.query_params.get("project_id") + + if project_id: + params["project_id"] = project_id + + _hash = dumps( + { + "headers": headers, + "cookies": cookies, + "params": params, + }, + sort_keys=True, + ) + + policy = None + if AGENTA_SDK_AUTH_CACHE: + policy = cache.get(_hash) + + if not policy: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.host}/api/permissions/verify", + headers=headers, + cookies=cookies, + params=params, + ) + + if response.status_code != 200: + cache.put(_hash, {"effect": "deny"}) + return Deny() + + auth = response.json() + + if auth.get("effect") != "allow": + cache.put(_hash, {"effect": "deny"}) + return Deny() + + policy = { + "effect": "allow", + "credentials": auth.get("credentials"), + } + + cache.put(_hash, policy) + + if not policy or policy.get("effect") == "deny": + return Deny() + + request.state.credentials = policy.get("credentials") + + return await call_next(request) + + except: # pylint: disable=bare-except + log.warning("------------------------------------------------------") + log.warning("Agenta SDK - handling auth middleware exception below:") + log.warning("------------------------------------------------------") + log.warning(format_exc().strip("\n")) + log.warning("------------------------------------------------------") + + return Deny() diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/middleware/cache.py b/agenta-cli/debugging/simple-app/agenta/sdk/middleware/cache.py new file mode 100644 index 0000000000..5445b1fafc --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/middleware/cache.py @@ -0,0 +1,43 @@ +from time import time +from collections import OrderedDict + + +class TTLLRUCache: + def __init__(self, capacity: int, ttl: int): + self.cache = OrderedDict() + self.capacity = capacity + self.ttl = ttl + + def get(self, key): + # CACHE + if key not in self.cache: + return None + + value, expiry = self.cache[key] + # ----- + + # TTL + if time() > expiry: + del self.cache[key] + + return None + # --- + + # LRU + self.cache.move_to_end(key) + # --- + + return value + + def put(self, key, value): + # CACHE + if key in self.cache: + del self.cache[key] + # CACHE & LRU + elif len(self.cache) >= self.capacity: + self.cache.popitem(last=False) + # ----------- + + # TTL + self.cache[key] = (value, time() + self.ttl) + # --- diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/prompt.py b/agenta-cli/debugging/simple-app/agenta/sdk/prompt.py new file mode 100644 index 0000000000..ea447f034f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/prompt.py @@ -0,0 +1,22 @@ +from pydantic import BaseModel, Field +from typing import Optional, List, Dict, Any + + +class Prompt(BaseModel): + """A pre-built BaseModel for prompt configuration""" + + system_message: str = Field(default="", description="System message for the prompt") + user_message: str = Field(default="", description="User message template") + temperature: float = Field( + default=0.7, ge=0.0, le=1.0, description="Temperature for text generation" + ) + max_tokens: Optional[int] = Field( + default=None, ge=1, description="Maximum number of tokens to generate" + ) + stop_sequences: Optional[List[str]] = Field( + default=None, + description="List of sequences where the model should stop generating", + ) + model_parameters: Optional[Dict[str, Any]] = Field( + default=None, description="Additional model-specific parameters" + ) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/router.py b/agenta-cli/debugging/simple-app/agenta/sdk/router.py new file mode 100644 index 0000000000..b4cb63b59b --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/router.py @@ -0,0 +1,8 @@ +from fastapi import APIRouter + +router = APIRouter() + + +@router.get("/health") +def health(): + return {"status": "ok"} diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/__init__.py new file mode 100644 index 0000000000..734c38b64d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/__init__.py @@ -0,0 +1 @@ +from .tracing import Tracing, get_tracer diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/attributes.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/attributes.py new file mode 100644 index 0000000000..5cf4adbff1 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/attributes.py @@ -0,0 +1,141 @@ +from json import loads, dumps +from typing import Optional, Union, Sequence, Any, Dict + +Primitive = Union[str, int, float, bool, bytes] +PrimitivesSequence = Sequence[Primitive] +Attribute = Union[Primitive, PrimitivesSequence] + + +def _marshal( + unmarshalled: Dict[str, Any], + *, + parent_key: Optional[str] = "", + depth: Optional[int] = 0, + max_depth: Optional[int] = None, +) -> Dict[str, Any]: + """ + Marshals a dictionary of unmarshalled attributes into a flat dictionary + + Example: + unmarshalled = { + "ag": { + "type": "tree", + "node": { + "name": "root", + "children": [ + { + "name": "child1", + }, + { + "name": "child2", + } + ] + } + } + } + marshalled = { + "ag.type": "tree", + "ag.node.name": "root", + "ag.node.children.0.name": "child1", + "ag.node.children.1.name": "child2" + } + """ + marshalled = {} + + # If max_depth is set and we've reached it, + # just return the unmarshalled attributes + if max_depth is not None and depth >= max_depth: + marshalled[parent_key] = unmarshalled + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + + return marshalled + + # Otherwise, + # iterate over the unmarshalled attributes and marshall them + for key, value in unmarshalled.items(): + child_key = f"{parent_key}.{key}" if parent_key else key + + if isinstance(value, dict): + dict_key = child_key + + marshalled.update( + _marshal( + value, + parent_key=dict_key, + depth=depth + 1, + max_depth=max_depth, + ) + ) + elif isinstance(value, list): + if max_depth is not None and depth + 1 >= max_depth: + marshalled[child_key] = value + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + else: + for i, item in enumerate(value): + list_key = f"{child_key}.{i}" + + if isinstance(item, (dict, list)): + marshalled.update( + _marshal( + item, + parent_key=list_key, + depth=depth + 1, + max_depth=max_depth, + ) + ) + else: + marshalled[list_key] = item + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + else: + marshalled[child_key] = value + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + + return marshalled + + +def _encode_key( + namespace: Optional[str] = None, + key: str = "", +) -> str: + if namespace is None: + return key + + return f"ag.{namespace}.{key}" + + +def _encode_value( + value: Any, +) -> Optional[Attribute]: + if value is None: + return None + + if isinstance(value, (str, int, float, bool, bytes)): + return value + + if isinstance(value, dict) or isinstance(value, list): + encoded = dumps(value) + value = "@ag.type=json:" + encoded + return value + + return repr(value) + + +def serialize( + *, + namespace: str, + attributes: Dict[str, Any], + max_depth: Optional[int] = None, +) -> Dict[str, str]: + if not isinstance(attributes, dict): + return {} + + _attributes = { + k: v + for k, v in { + _encode_key(namespace, key): _encode_value(value) + for key, value in _marshal(attributes, max_depth=max_depth).items() + }.items() + if v is not None + } + + return _attributes diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/context.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/context.py new file mode 100644 index 0000000000..23925db01d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/context.py @@ -0,0 +1,24 @@ +from contextvars import ContextVar +from contextlib import contextmanager +from traceback import format_exc + +from agenta.sdk.utils.logging import log + +tracing_context = ContextVar("tracing_context", default={}) + + +@contextmanager +def tracing_context_manager(): + _tracing_context = {"health": {"status": "ok"}} + + token = tracing_context.set(_tracing_context) + try: + yield + except: # pylint: disable=bare-except + log.warning("----------------------------------------------") + log.warning("Agenta SDK - handling tracing exception below:") + log.warning("----------------------------------------------") + log.warning(format_exc().strip("\n")) + log.warning("----------------------------------------------") + finally: + tracing_context.reset(token) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/conventions.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/conventions.py new file mode 100644 index 0000000000..018cf64dea --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/conventions.py @@ -0,0 +1,49 @@ +from enum import Enum +from re import fullmatch + +from opentelemetry.trace import SpanKind + + +class Reference(str, Enum): + # + VARIANT_ID = "variant.id" + VARIANT_SLUG = "variant.slug" + VARIANT_VERSION = "variant.version" + # + ENVIRONMENT_ID = "environment.id" + ENVIRONMENT_SLUG = "environment.slug" + ENVIRONMENT_VERSION = "environment.version" + # + APPLICATION_ID = "application.id" + APPLICATION_SLUG = "application.slug" + # + + +_PATTERN = r"[A-Za-z0-9._-]+" + + +def is_valid_attribute_key( + string: str, +): + return bool(fullmatch(_PATTERN, string)) + + +def parse_span_kind(type: str) -> SpanKind: + kind = SpanKind.INTERNAL + if type in [ + "agent", + "chain", + "workflow", + ]: + kind = SpanKind.SERVER + elif type in [ + "tool", + "embedding", + "query", + "completion", + "chat", + "rerank", + ]: + kind = SpanKind.CLIENT + + return kind diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/exporters.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/exporters.py new file mode 100644 index 0000000000..62f03a10b5 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/exporters.py @@ -0,0 +1,65 @@ +from typing import Sequence, Dict, List + +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SpanExporter, + SpanExportResult, + ReadableSpan, +) + +from agenta.sdk.utils.exceptions import suppress + + +class InlineTraceExporter(SpanExporter): + def __init__(self, registry: Dict[str, List[ReadableSpan]]): + self._shutdown = False + self._registry = registry + + def export( + self, + spans: Sequence[ReadableSpan], + ) -> SpanExportResult: + if self._shutdown: + return + + with suppress(): + for span in spans: + trace_id = span.get_span_context().trace_id + + if trace_id not in self._registry: + self._registry[trace_id] = [] + + self._registry[trace_id].append(span) + + def shutdown(self) -> None: + self._shutdown = True + + def force_flush(self, timeout_millis: int = 30000) -> bool: + return True + + def is_ready( + self, + trace_id: int, + ) -> bool: + is_ready = trace_id in self._registry + + return is_ready + + def fetch( + self, + trace_id: int, + ) -> List[ReadableSpan]: + trace = self._registry.get(trace_id, []) + + if trace_id in self._registry: + del self._registry[trace_id] + + return trace + + +OTLPSpanExporter._MAX_RETRY_TIMEOUT = 2 # pylint: disable=protected-access + +ConsoleExporter = ConsoleSpanExporter +InlineExporter = InlineTraceExporter +OTLPExporter = OTLPSpanExporter diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/inline.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/inline.py new file mode 100644 index 0000000000..6905ad5cf0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/inline.py @@ -0,0 +1,1146 @@ +############################ +### services.shared.dtos ### +### -------------------- ### + +from typing import Optional + +from pydantic import BaseModel +from uuid import UUID +from datetime import datetime +from enum import Enum +from collections import OrderedDict + + +class ProjectScopeDTO(BaseModel): + project_id: UUID + + +class LifecycleDTO(BaseModel): + created_at: datetime + updated_at: Optional[datetime] = None + + updated_by_id: Optional[UUID] = None + + +### -------------------- ### +### services.shared.dtos ### +############################ + + +################################### +### services.observability.dtos ### +### --------------------------- ### + +from typing import List, Dict, Any, Union, Optional + +from enum import Enum +from datetime import datetime +from uuid import UUID + + +class TimeDTO(BaseModel): + start: datetime + end: datetime + + +class StatusCode(Enum): + UNSET = "UNSET" + OK = "OK" + ERROR = "ERROR" + + +class StatusDTO(BaseModel): + code: StatusCode + message: Optional[str] = None + stacktrace: Optional[str] = None + + +AttributeValueType = Any +Attributes = Dict[str, AttributeValueType] + + +class TreeType(Enum): + # --- VARIANTS --- # + INVOCATION = "invocation" + # --- VARIANTS --- # + + +class NodeType(Enum): + # --- VARIANTS --- # + ## SPAN_KIND_SERVER + AGENT = "agent" + WORKFLOW = "workflow" + CHAIN = "chain" + ## SPAN_KIND_INTERNAL + TASK = "task" + ## SPAN_KIND_CLIENT + TOOL = "tool" + EMBEDDING = "embedding" + QUERY = "query" + COMPLETION = "completion" + CHAT = "chat" + RERANK = "rerank" + # --- VARIANTS --- # + + +class RootDTO(BaseModel): + id: UUID + + +class TreeDTO(BaseModel): + id: UUID + type: Optional[TreeType] = None + + +class NodeDTO(BaseModel): + id: UUID + type: Optional[NodeType] = None + name: str + + +Data = Dict[str, Any] +Metrics = Dict[str, Any] +Metadata = Dict[str, Any] +Tags = Dict[str, str] +Refs = Dict[str, str] + + +class LinkDTO(BaseModel): + type: str + id: UUID + tree_id: Optional[UUID] = None + + +class ParentDTO(BaseModel): + id: UUID + + +class OTelSpanKind(Enum): + SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED" + # INTERNAL + SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL" + # SYNCHRONOUS + SPAN_KIND_SERVER = "SPAN_KIND_SERVER" + SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT" + # ASYNCHRONOUS + SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER" + SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER" + + +class OTelStatusCode(Enum): + STATUS_CODE_OK = "STATUS_CODE_OK" + STATUS_CODE_ERROR = "STATUS_CODE_ERROR" + STATUS_CODE_UNSET = "STATUS_CODE_UNSET" + + +class OTelContextDTO(BaseModel): + trace_id: str + span_id: str + + +class OTelEventDTO(BaseModel): + name: str + timestamp: datetime + + attributes: Optional[Attributes] = None + + +class OTelLinkDTO(BaseModel): + context: OTelContextDTO + + attributes: Optional[Attributes] = None + + +class OTelExtraDTO(BaseModel): + kind: Optional[str] = None + + attributes: Optional[Attributes] = None + events: Optional[List[OTelEventDTO]] = None + links: Optional[List[OTelLinkDTO]] = None + + +class SpanDTO(BaseModel): + scope: Optional[ProjectScopeDTO] = None + + lifecycle: Optional[LifecycleDTO] = None + + root: RootDTO + tree: TreeDTO + node: NodeDTO + + parent: Optional[ParentDTO] = None + + time: TimeDTO + status: StatusDTO + + data: Optional[Data] = None + metrics: Optional[Metrics] = None + meta: Optional[Metadata] = None + tags: Optional[Tags] = None + refs: Optional[Refs] = None + + links: Optional[List[LinkDTO]] = None + + otel: Optional[OTelExtraDTO] = None + + nodes: Optional[Dict[str, Union["SpanDTO", List["SpanDTO"]]]] = None + + +class OTelSpanDTO(BaseModel): + context: OTelContextDTO + + name: str + kind: OTelSpanKind = OTelSpanKind.SPAN_KIND_UNSPECIFIED + + start_time: datetime + end_time: datetime + + status_code: OTelStatusCode = OTelStatusCode.STATUS_CODE_UNSET + status_message: Optional[str] = None + + attributes: Optional[Attributes] = None + events: Optional[List[OTelEventDTO]] = None + + parent: Optional[OTelContextDTO] = None + links: Optional[List[OTelLinkDTO]] = None + + +### --------------------------- ### +### services.observability.dtos ### +################################### + + +#################################### +### services.observability.utils ### +### ---------------------------- ### + +from typing import List, Dict, OrderedDict + + +def parse_span_dtos_to_span_idx( + span_dtos: List[SpanDTO], +) -> Dict[str, SpanDTO]: + span_idx = {span_dto.node.id: span_dto for span_dto in span_dtos} + + return span_idx + + +def parse_span_idx_to_span_id_tree( + span_idx: Dict[str, SpanDTO], +) -> OrderedDict: + span_id_tree = OrderedDict() + index = {} + + def push(span_dto: SpanDTO) -> None: + if span_dto.parent is None: + span_id_tree[span_dto.node.id] = OrderedDict() + index[span_dto.node.id] = span_id_tree[span_dto.node.id] + elif span_dto.parent.id in index: + index[span_dto.parent.id][span_dto.node.id] = OrderedDict() + index[span_dto.node.id] = index[span_dto.parent.id][span_dto.node.id] + + for span_dto in sorted(span_idx.values(), key=lambda span_dto: span_dto.time.start): + push(span_dto) + + return span_id_tree + + +def cumulate_costs( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, SpanDTO], +) -> None: + def _get_unit(span: SpanDTO): + if span.metrics is not None: + return span.metrics.get("unit.costs.total", 0.0) + + return 0.0 + + def _get_acc(span: SpanDTO): + if span.metrics is not None: + return span.metrics.get("acc.costs.total", 0.0) + + return 0.0 + + def _acc(a: float, b: float): + return a + b + + def _set(span: SpanDTO, cost: float): + if span.metrics is None: + span.metrics = {} + + if cost != 0.0: + span.metrics["acc.costs.total"] = cost + + _cumulate_tree_dfs(spans_id_tree, spans_idx, _get_unit, _get_acc, _acc, _set) + + +def cumulate_tokens( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, dict], +) -> None: + def _get_unit(span: SpanDTO): + _tokens = { + "prompt": 0.0, + "completion": 0.0, + "total": 0.0, + } + + if span.metrics is not None: + return { + "prompt": span.metrics.get("unit.tokens.prompt", 0.0), + "completion": span.metrics.get("unit.tokens.completion", 0.0), + "total": span.metrics.get("unit.tokens.total", 0.0), + } + + return _tokens + + def _get_acc(span: SpanDTO): + _tokens = { + "prompt": 0.0, + "completion": 0.0, + "total": 0.0, + } + + if span.metrics is not None: + return { + "prompt": span.metrics.get("acc.tokens.prompt", 0.0), + "completion": span.metrics.get("acc.tokens.completion", 0.0), + "total": span.metrics.get("acc.tokens.total", 0.0), + } + + return _tokens + + def _acc(a: dict, b: dict): + return { + "prompt": a.get("prompt", 0.0) + b.get("prompt", 0.0), + "completion": a.get("completion", 0.0) + b.get("completion", 0.0), + "total": a.get("total", 0.0) + b.get("total", 0.0), + } + + def _set(span: SpanDTO, tokens: dict): + if span.metrics is None: + span.metrics = {} + + if tokens.get("prompt", 0.0) != 0.0: + span.metrics["acc.tokens.prompt"] = tokens.get("prompt", 0.0) + if tokens.get("completion", 0.0) != 0.0: + span.metrics["acc.tokens.completion"] = ( + tokens.get("completion", 0.0) + if tokens.get("completion", 0.0) != 0.0 + else None + ) + if tokens.get("total", 0.0) != 0.0: + span.metrics["acc.tokens.total"] = ( + tokens.get("total", 0.0) if tokens.get("total", 0.0) != 0.0 else None + ) + + _cumulate_tree_dfs(spans_id_tree, spans_idx, _get_unit, _get_acc, _acc, _set) + + +def _cumulate_tree_dfs( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, SpanDTO], + get_unit_metric, + get_acc_metric, + accumulate_metric, + set_metric, +): + for span_id, children_spans_id_tree in spans_id_tree.items(): + children_spans_id_tree: OrderedDict + + cumulated_metric = get_unit_metric(spans_idx[span_id]) + + _cumulate_tree_dfs( + children_spans_id_tree, + spans_idx, + get_unit_metric, + get_acc_metric, + accumulate_metric, + set_metric, + ) + + for child_span_id in children_spans_id_tree.keys(): + marginal_metric = get_acc_metric(spans_idx[child_span_id]) + cumulated_metric = accumulate_metric(cumulated_metric, marginal_metric) + + set_metric(spans_idx[span_id], cumulated_metric) + + +def connect_children( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, dict], +) -> None: + _connect_tree_dfs(spans_id_tree, spans_idx) + + +def _connect_tree_dfs( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, SpanDTO], +): + for span_id, children_spans_id_tree in spans_id_tree.items(): + children_spans_id_tree: OrderedDict + + parent_span = spans_idx[span_id] + + parent_span.nodes = dict() + + _connect_tree_dfs(children_spans_id_tree, spans_idx) + + for child_span_id in children_spans_id_tree.keys(): + child_span_name = spans_idx[child_span_id].node.name + if child_span_name not in parent_span.nodes: + parent_span.nodes[child_span_name] = spans_idx[child_span_id] + else: + if not isinstance(parent_span.nodes[child_span_name], list): + parent_span.nodes[child_span_name] = [ + parent_span.nodes[child_span_name] + ] + + parent_span.nodes[child_span_name].append(spans_idx[child_span_id]) + + if len(parent_span.nodes) == 0: + parent_span.nodes = None + + +### ---------------------------- ### +### services.observability.utils ### +#################################### + + +######################################################## +### apis.fastapi.observability.opentelemetry.semconv ### +### ------------------------------------------------ ### + +from json import loads + +VERSION = "0.4.1" + +V_0_4_1_ATTRIBUTES_EXACT = [ + # OPENLLMETRY + ("gen_ai.system", "ag.meta.system"), + ("gen_ai.request.base_url", "ag.meta.request.base_url"), + ("gen_ai.request.endpoint", "ag.meta.request.endpoint"), + ("gen_ai.request.headers", "ag.meta.request.headers"), + ("gen_ai.request.type", "ag.type.node"), + ("gen_ai.request.streaming", "ag.meta.request.streaming"), + ("gen_ai.request.model", "ag.meta.request.model"), + ("gen_ai.request.max_tokens", "ag.meta.request.max_tokens"), + ("gen_ai.request.temperature", "ag.meta.request.temperature"), + ("gen_ai.request.top_p", "ag.meta.request.top_p"), + ("gen_ai.response.model", "ag.meta.response.model"), + ("gen_ai.usage.prompt_tokens", "ag.metrics.unit.tokens.prompt"), + ("gen_ai.usage.completion_tokens", "ag.metrics.unit.tokens.completion"), + ("gen_ai.usage.total_tokens", "ag.metrics.unit.tokens.total"), + ("llm.headers", "ag.meta.request.headers"), + ("llm.request.type", "ag.type.node"), + ("llm.top_k", "ag.meta.request.top_k"), + ("llm.is_streaming", "ag.meta.request.streaming"), + ("llm.usage.total_tokens", "ag.metrics.unit.tokens.total"), + ("gen_ai.openai.api_base", "ag.meta.request.base_url"), + ("db.system", "ag.meta.system"), + ("db.vector.query.top_k", "ag.meta.request.top_k"), + ("pinecone.query.top_k", "ag.meta.request.top_k"), + ("traceloop.span.kind", "ag.type.node"), + ("traceloop.entity.name", "ag.node.name"), + # OPENINFERENCE + ("output.value", "ag.data.outputs"), + ("input.value", "ag.data.inputs"), + ("embedding.model_name", "ag.meta.request.model"), + ("llm.invocation_parameters", "ag.meta.request"), + ("llm.model_name", "ag.meta.request.model"), + ("llm.provider", "ag.meta.provider"), + ("llm.system", "ag.meta.system"), +] +V_0_4_1_ATTRIBUTES_PREFIX = [ + # OPENLLMETRY + ("gen_ai.prompt", "ag.data.inputs.prompt"), + ("gen_ai.completion", "ag.data.outputs.completion"), + ("llm.request.functions", "ag.data.inputs.functions"), + ("llm.request.tools", "ag.data.inputs.tools"), + # OPENINFERENCE + ("llm.token_count", "ag.metrics.unit.tokens"), + ("llm.input_messages", "ag.data.inputs.prompt"), + ("llm.output_messages", "ag.data.outputs.completion"), +] + +V_0_4_1_ATTRIBUTES_DYNAMIC = [ + # OPENLLMETRY + ("traceloop.entity.input", lambda x: ("ag.data.inputs", loads(x).get("inputs"))), + ("traceloop.entity.output", lambda x: ("ag.data.outputs", loads(x).get("outputs"))), +] + + +V_0_4_1_MAPS = { + "attributes": { + "exact": { + "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_EXACT[::-1]}, + "to": {agenta: otel for otel, agenta in V_0_4_1_ATTRIBUTES_EXACT[::-1]}, + }, + "prefix": { + "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_PREFIX[::-1]}, + "to": {agenta: otel for otel, agenta in V_0_4_1_ATTRIBUTES_PREFIX[::-1]}, + }, + "dynamic": { + "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_DYNAMIC[::-1]} + }, + }, +} +V_0_4_1_KEYS = { + "attributes": { + "exact": { + "from": list(V_0_4_1_MAPS["attributes"]["exact"]["from"].keys()), + "to": list(V_0_4_1_MAPS["attributes"]["exact"]["to"].keys()), + }, + "prefix": { + "from": list(V_0_4_1_MAPS["attributes"]["prefix"]["from"].keys()), + "to": list(V_0_4_1_MAPS["attributes"]["prefix"]["to"].keys()), + }, + "dynamic": { + "from": list(V_0_4_1_MAPS["attributes"]["dynamic"]["from"].keys()), + }, + }, +} + + +MAPS = { + "0.4.1": V_0_4_1_MAPS, # LATEST +} +KEYS = { + "0.4.1": V_0_4_1_KEYS, # LATEST +} + +CODEX = {"maps": MAPS[VERSION], "keys": KEYS[VERSION]} + + +### ------------------------------------------------ ### +### apis.fastapi.observability.opentelemetry.semconv ### +######################################################## + + +######################################## +### apis.fastapi.observability.utils ### +### -------------------------------- ### + +from typing import Optional, Union, Tuple, Any, List, Dict +from uuid import UUID +from collections import OrderedDict +from json import loads, JSONDecodeError, dumps +from copy import copy + + +def _unmarshal_attributes( + marshalled: Dict[str, Any], +) -> Dict[str, Any]: + """ + Unmarshals a dictionary of marshalled attributes into a nested dictionary + + Example: + marshalled = { + "ag.type": "tree", + "ag.node.name": "root", + "ag.node.children.0.name": "child1", + "ag.node.children.1.name": "child2" + } + unmarshalled = { + "ag": { + "type": "tree", + "node": { + "name": "root", + "children": [ + { + "name": "child1", + }, + { + "name": "child2", + } + ] + } + } + } + """ + unmarshalled = {} + + for key, value in marshalled.items(): + keys = key.split(".") + + level = unmarshalled + + for i, part in enumerate(keys[:-1]): + if part.isdigit(): + part = int(part) + + if not isinstance(level, list): + level = [] + + while len(level) <= part: + level.append({}) + + level = level[part] + + else: + if part not in level: + level[part] = {} if not keys[i + 1].isdigit() else [] + + level = level[part] + + last_key = keys[-1] + + if last_key.isdigit(): + last_key = int(last_key) + + if not isinstance(level, list): + level = [] + + while len(level) <= last_key: + level.append(None) + + level[last_key] = value + + else: + level[last_key] = value + + return unmarshalled + + +def _encode_key( + namespace, + key: str, +) -> str: + return f"ag.{namespace}.{key}" + + +def _decode_key( + namespace, + key: str, +) -> str: + return key.replace(f"ag.{namespace}.", "") + + +def _decode_value( + value: Any, +) -> Any: + if isinstance(value, (int, float, bool, bytes)): + return value + + if isinstance(value, str): + if value == "@ag.type=none:": + return None + + if value.startswith("@ag.type=json:"): + encoded = value[len("@ag.type=json:") :] + value = loads(encoded) + return value + + return value + + return value + + +def _get_attributes( + attributes: Attributes, + namespace: str, +): + return { + _decode_key(namespace, key): _decode_value(value) + for key, value in attributes.items() + if key != _decode_key(namespace, key) + } + + +def _parse_from_types( + otel_span_dto: OTelSpanDTO, +) -> dict: + types = _get_attributes(otel_span_dto.attributes, "type") + + if types.get("tree"): + del otel_span_dto.attributes[_encode_key("type", "tree")] + + if types.get("node"): + del otel_span_dto.attributes[_encode_key("type", "node")] + + return types + + +def _parse_from_semconv( + attributes: Attributes, +) -> None: + _attributes = copy(attributes) + + for old_key, value in _attributes.items(): + if old_key in CODEX["keys"]["attributes"]["exact"]["from"]: + new_key = CODEX["maps"]["attributes"]["exact"]["from"][old_key] + + attributes[new_key] = value + + del attributes[old_key] + + else: + for prefix_key in CODEX["keys"]["attributes"]["prefix"]["from"]: + if old_key.startswith(prefix_key): + prefix = CODEX["maps"]["attributes"]["prefix"]["from"][prefix_key] + + new_key = old_key.replace(prefix_key, prefix) + + attributes[new_key] = value + + del attributes[old_key] + + for dynamic_key in CODEX["keys"]["attributes"]["dynamic"]["from"]: + if old_key == dynamic_key: + try: + new_key, new_value = CODEX["maps"]["attributes"]["dynamic"][ + "from" + ][dynamic_key](value) + + attributes[new_key] = new_value + + except: # pylint: disable=bare-except + pass + + +def _parse_from_links( + otel_span_dto: OTelSpanDTO, +) -> dict: + # TESTING + otel_span_dto.links = [ + OTelLinkDTO( + context=otel_span_dto.context, + attributes={"ag.type.link": "testcase"}, + ) + ] + # ------- + + # LINKS + links = None + otel_links = None + + if otel_span_dto.links: + links = list() + otel_links = list() + + for link in otel_span_dto.links: + _links = _get_attributes(link.attributes, "type") + + if _links: + link_type = _links.get("link") + link_tree_id = str(UUID(link.context.trace_id[2:])) + link_node_id = str( + UUID(link.context.trace_id[2 + 16 :] + link.context.span_id[2:]) + ) + + links.append( + LinkDTO( + type=link_type, + tree_id=link_tree_id, + id=link_node_id, + ) + ) + else: + otel_links.append(link) + + links = links if links else None + otel_links = otel_links if otel_links else None + + otel_span_dto.links = otel_links + + return links + + +def _parse_from_attributes( + otel_span_dto: OTelSpanDTO, +) -> Tuple[dict, dict, dict, dict, dict]: + # DATA + _data = _get_attributes(otel_span_dto.attributes, "data") + + for key in _data.keys(): + del otel_span_dto.attributes[_encode_key("data", key)] + + # _data = _unmarshal_attributes(_data) + _data = _data if _data else None + + # METRICS + _metrics = _get_attributes(otel_span_dto.attributes, "metrics") + + for key in _metrics.keys(): + del otel_span_dto.attributes[_encode_key("metrics", key)] + + # _metrics = _unmarshal_attributes(_metrics) + _metrics = _metrics if _metrics else None + + # META + _meta = _get_attributes(otel_span_dto.attributes, "meta") + + for key in _meta.keys(): + del otel_span_dto.attributes[_encode_key("meta", key)] + + # _meta = _unmarshal_attributes(_meta) + _meta = _meta if _meta else None + + # TAGS + _tags = _get_attributes(otel_span_dto.attributes, "tags") + + for key in _tags.keys(): + del otel_span_dto.attributes[_encode_key("tags", key)] + + _tags = _tags if _tags else None + + # REFS + _refs = _get_attributes(otel_span_dto.attributes, "refs") + + for key in _refs.keys(): + del otel_span_dto.attributes[_encode_key("refs", key)] + + _refs = _refs if _refs else None + + if len(otel_span_dto.attributes.keys()) < 1: + otel_span_dto.attributes = None + + return _data, _metrics, _meta, _tags, _refs + + +def parse_from_otel_span_dto( + otel_span_dto: OTelSpanDTO, +) -> SpanDTO: + lifecyle = LifecycleDTO( + created_at=datetime.now(), + ) + + _parse_from_semconv(otel_span_dto.attributes) + + types = _parse_from_types(otel_span_dto) + + tree_id = UUID(otel_span_dto.context.trace_id[2:]) + + tree_type: str = types.get("tree") + + tree = TreeDTO( + id=tree_id, + type=tree_type.lower() if tree_type else None, + ) + + node_id = UUID(tree_id.hex[16:] + otel_span_dto.context.span_id[2:]) + + node_type = NodeType.TASK + try: + node_type = NodeType(types.get("node", "").lower()) + except: # pylint: disable=bare-except + pass + + node = NodeDTO( + id=node_id, + type=node_type, + name=otel_span_dto.name, + ) + + parent = ( + ParentDTO( + id=( + UUID( + otel_span_dto.parent.trace_id[2 + 16 :] + + otel_span_dto.parent.span_id[2:] + ) + ) + ) + if otel_span_dto.parent + else None + ) + + time = TimeDTO( + start=otel_span_dto.start_time, + end=otel_span_dto.end_time, + ) + + status = StatusDTO( + code=otel_span_dto.status_code.value.replace("STATUS_CODE_", ""), + message=otel_span_dto.status_message, + ) + + links = _parse_from_links(otel_span_dto) + + data, metrics, meta, tags, refs = _parse_from_attributes(otel_span_dto) + + duration = (otel_span_dto.end_time - otel_span_dto.start_time).total_seconds() + + if metrics is None: + metrics = dict() + + metrics["acc.duration.total"] = round(duration * 1_000, 3) # milliseconds + + root_id = str(tree_id) + if refs is not None: + root_id = refs.get("scenario.id", root_id) + + root = RootDTO(id=UUID(root_id)) + + otel = OTelExtraDTO( + kind=otel_span_dto.kind.value, + attributes=otel_span_dto.attributes, + events=otel_span_dto.events, + links=otel_span_dto.links, + ) + + span_dto = SpanDTO( + lifecycle=lifecyle, + root=root, + tree=tree, + node=node, + parent=parent, + time=time, + status=status, + data=data, + metrics=metrics, + meta=meta, + tags=tags, + refs=refs, + links=links, + otel=otel, + ) + + return span_dto + + +def parse_to_agenta_span_dto( + span_dto: SpanDTO, +) -> SpanDTO: + # DATA + if span_dto.data: + span_dto.data = _unmarshal_attributes(span_dto.data) + + if "outputs" in span_dto.data: + if "__default__" in span_dto.data["outputs"]: + span_dto.data["outputs"] = span_dto.data["outputs"]["__default__"] + + # METRICS + if span_dto.metrics: + span_dto.metrics = _unmarshal_attributes(span_dto.metrics) + + # META + if span_dto.meta: + span_dto.meta = _unmarshal_attributes(span_dto.meta) + + # TAGS + if span_dto.tags: + span_dto.tags = _unmarshal_attributes(span_dto.tags) + + # REFS + if span_dto.refs: + span_dto.refs = _unmarshal_attributes(span_dto.refs) + + for link in span_dto.links: + link.tree_id = None + + if span_dto.nodes: + for v in span_dto.nodes.values(): + if isinstance(v, list): + for n in v: + parse_to_agenta_span_dto(n) + else: + parse_to_agenta_span_dto(v) + + # MASK LINKS FOR NOW + span_dto.links = None + # ------------------ + + # MASK LIFECYCLE FOR NOW + # span_dto.lifecycle = None + if span_dto.lifecycle: + span_dto.lifecycle.updated_at = None + span_dto.lifecycle.updated_by_id = None + # ---------------------- + + return span_dto + + +### -------------------------------- ### +### apis.fastapi.observability.utils ### +######################################## + + +from litellm import cost_calculator +from opentelemetry.sdk.trace import ReadableSpan + +from agenta.sdk.types import AgentaNodeDto, AgentaNodesResponse + + +def parse_inline_trace( + spans: Dict[str, ReadableSpan], +): + otel_span_dtos = _parse_readable_spans(spans) + + ############################################################ + ### apis.fastapi.observability.api.otlp_collect_traces() ### + ### ---------------------------------------------------- ### + span_dtos = [ + parse_from_otel_span_dto(otel_span_dto) for otel_span_dto in otel_span_dtos + ] + ### ---------------------------------------------------- ### + ### apis.fastapi.observability.api.otlp_collect_traces() ### + ############################################################ + + ##################################################### + ### services.observability.service.ingest/query() ### + ### --------------------------------------------- ### + span_idx = parse_span_dtos_to_span_idx(span_dtos) + span_id_tree = parse_span_idx_to_span_id_tree(span_idx) + ### --------------------------------------------- ### + ### services.observability.service.ingest/query() ### + ##################################################### + + ############################################### + ### services.observability.service.ingest() ### + ### --------------------------------------- ### + calculate_costs(span_idx) + cumulate_costs(span_id_tree, span_idx) + cumulate_tokens(span_id_tree, span_idx) + ### --------------------------------------- ### + ### services.observability.service.ingest() ### + ############################################### + + ############################################## + ### services.observability.service.query() ### + ### -------------------------------------- ### + connect_children(span_id_tree, span_idx) + root_span_dtos = [span_idx[span_id] for span_id in span_id_tree.keys()] + agenta_span_dtos = [ + parse_to_agenta_span_dto(span_dto) for span_dto in root_span_dtos + ] + ### -------------------------------------- ### + ### services.observability.service.query() ### + ############################################## + + spans = [ + loads( + span_dto.model_dump_json( + exclude_none=True, + exclude_defaults=True, + ) + ) + for span_dto in agenta_span_dtos + ] + inline_trace = AgentaNodesResponse( + version="1.0.0", + nodes=[AgentaNodeDto(**span) for span in spans], + ).model_dump(exclude_none=True, exclude_unset=True) + return inline_trace + + +def _parse_readable_spans( + spans: List[ReadableSpan], +) -> List[OTelSpanDTO]: + otel_span_dtos = list() + + for span in spans: + otel_span_dto = OTelSpanDTO( + context=OTelContextDTO( + trace_id=_int_to_hex(span.get_span_context().trace_id, 128), + span_id=_int_to_hex(span.get_span_context().span_id, 64), + ), + name=span.name, + kind=OTelSpanKind( + "SPAN_KIND_" + + (span.kind if isinstance(span.kind, str) else span.kind.name) + ), + start_time=_timestamp_ns_to_datetime(span.start_time), + end_time=_timestamp_ns_to_datetime(span.end_time), + status_code=OTelStatusCode("STATUS_CODE_" + span.status.status_code.name), + status_message=span.status.description, + attributes=span.attributes, + events=[ + OTelEventDTO( + name=event.name, + timestamp=_timestamp_ns_to_datetime(event.timestamp), + attributes=event.attributes, + ) + for event in span.events + ], + parent=( + OTelContextDTO( + trace_id=_int_to_hex(span.parent.trace_id, 128), + span_id=_int_to_hex(span.parent.span_id, 64), + ) + if span.parent + else None + ), + links=[ + OTelLinkDTO( + context=OTelContextDTO( + trace_id=_int_to_hex(link.context.trace_id, 128), + span_id=_int_to_hex(link.context.span_id, 64), + ), + attributes=link.attributes, + ) + for link in span.links + ], + ) + + otel_span_dtos.append(otel_span_dto) + + return otel_span_dtos + + +def _int_to_hex(integer, bits): + _hex = hex(integer)[2:] + + _hex = _hex.zfill(bits // 4) + + _hex = "0x" + _hex + + return _hex + + +def _timestamp_ns_to_datetime(timestamp_ns): + _datetime = datetime.fromtimestamp( + timestamp_ns / 1_000_000_000, + ).isoformat( + timespec="microseconds", + ) + + return _datetime + + +class LlmTokens(BaseModel): + prompt_tokens: Optional[int] = 0 + completion_tokens: Optional[int] = 0 + total_tokens: Optional[int] = 0 + + +TYPES_WITH_COSTS = [ + "embedding", + "query", + "completion", + "chat", + "rerank", +] + + +def calculate_costs(span_idx: Dict[str, SpanDTO]): + for span in span_idx.values(): + if ( + span.node.type + and span.node.type.name.lower() in TYPES_WITH_COSTS + and span.meta + and span.metrics + ): + model = span.meta.get("response.model") + prompt_tokens = span.metrics.get("unit.tokens.prompt", 0.0) + completion_tokens = span.metrics.get("unit.tokens.completion", 0.0) + + try: + costs = cost_calculator.cost_per_token( + model=model, + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + ) + + if not costs: + continue + + prompt_cost, completion_cost = costs + total_cost = prompt_cost + completion_cost + + span.metrics["unit.costs.prompt"] = prompt_cost + span.metrics["unit.costs.completion"] = completion_cost + span.metrics["unit.costs.total"] = total_cost + + except: # pylint: disable=bare-except + pass diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/processors.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/processors.py new file mode 100644 index 0000000000..b5d04d8085 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/processors.py @@ -0,0 +1,113 @@ +from typing import Optional, Dict, List + +from opentelemetry.context import Context +from opentelemetry.sdk.trace import Span +from opentelemetry.sdk.trace.export import ( + SpanExporter, + ReadableSpan, + BatchSpanProcessor, + _DEFAULT_MAX_QUEUE_SIZE, + _DEFAULT_MAX_EXPORT_BATCH_SIZE, +) + +from agenta.sdk.utils.logging import log + +# LOAD CONTEXT, HERE ! + + +class TraceProcessor(BatchSpanProcessor): + def __init__( + self, + span_exporter: SpanExporter, + references: Dict[str, str] = None, + max_queue_size: int = None, + schedule_delay_millis: float = None, + max_export_batch_size: int = None, + export_timeout_millis: float = None, + ): + super().__init__( + span_exporter, + _DEFAULT_MAX_QUEUE_SIZE, + 12 * 60 * 60 * 1000, # 12 hours + _DEFAULT_MAX_EXPORT_BATCH_SIZE, + 500, # < 1 second (0.5 seconds) + ) + + self._registry = dict() + self._exporter = span_exporter + self.references = references or dict() + self.spans: Dict[int, List[ReadableSpan]] = dict() + + def on_start( + self, + span: Span, + parent_context: Optional[Context] = None, + ) -> None: + for key in self.references.keys(): + span.set_attribute(f"ag.refs.{key}", self.references[key]) + + if span.context.trace_id not in self._registry: + self._registry[span.context.trace_id] = dict() + + self._registry[span.context.trace_id][span.context.span_id] = True + + def on_end( + self, + span: ReadableSpan, + ): + if self.done: + return + + if span.context.trace_id not in self.spans: + self.spans[span.context.trace_id] = list() + + self.spans[span.context.trace_id].append(span) + + del self._registry[span.context.trace_id][span.context.span_id] + + if len(self._registry[span.context.trace_id]) == 0: + self.export(span.context.trace_id) + + def export( + self, + trace_id: int, + ): + spans = self.spans[trace_id] + + for span in spans: + self.queue.appendleft(span) + + with self.condition: + self.condition.notify() + + del self.spans[trace_id] + + def force_flush( + self, + timeout_millis: int = None, + ) -> bool: + ret = super().force_flush(timeout_millis) + + if not ret: + log.warning("Agenta SDK - skipping export due to timeout.") + + def is_ready( + self, + trace_id: Optional[int] = None, + ) -> bool: + is_ready = True + + try: + is_ready = self._exporter.is_ready(trace_id) + except: # pylint: disable=bare-except + pass + + return is_ready + + def fetch( + self, + trace_id: Optional[int] = None, + ) -> Dict[str, ReadableSpan]: + trace = self._exporter.fetch(trace_id) # type: ignore + + return trace diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/spans.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/spans.py new file mode 100644 index 0000000000..eaee49b0f8 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/spans.py @@ -0,0 +1,136 @@ +from typing import Optional, Union, Any, Dict + +from opentelemetry.trace import SpanContext +from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.sdk.trace import Span + +from agenta.sdk.tracing.attributes import serialize + + +class CustomSpan(Span): # INHERITANCE FOR TYPING ONLY + def __init__( + self, + span: Span, + ) -> None: + super().__init__( # INHERITANCE FOR TYPING ONLY + name=span.name, + context=span.context, + parent=span.parent, + sampler=span._sampler, + trace_config=span._trace_config, + resource=span.resource, + attributes=span.attributes, + events=span.events, + links=span.links, + kind=span.kind, + span_processor=span._span_processor, + instrumentation_info=span.instrumentation_info, + record_exception=span._record_exception, + set_status_on_exception=span._set_status_on_exception, + limits=span._limits, + instrumentation_scope=span.instrumentation_scope, + ) + + self._span = span + + ## --- PROXY METHODS --- ## + + def get_span_context(self): + return self._span.get_span_context() + + def is_recording(self) -> bool: + return self._span.is_recording() + + def update_name( + self, + name: str, + ) -> None: + self._span.update_name(name) + + def set_status( + self, + status: Union[Status, StatusCode], + description: Optional[str] = None, + ) -> None: + self._span.set_status( + status=status, + description=description, + ) + + def end(self) -> None: + self._span.end() + + ## --- CUSTOM METHODS W/ ATTRIBUTES SERALIZATION --- ## + + def set_attributes( + self, + attributes: Dict[str, Any], + namespace: Optional[str] = None, + max_depth: Optional[int] = None, + ) -> None: + self._span.set_attributes( + attributes=serialize( + namespace=namespace, + attributes=attributes, + max_depth=max_depth, + ) + ) + + def set_attribute( + self, + key: str, + value: Any, + namespace: Optional[str] = None, + ) -> None: + self.set_attributes( + attributes={key: value}, + namespace=namespace, + ) + + def add_event( + self, + name: str, + attributes: Optional[Dict[str, Any]] = None, + timestamp: Optional[int] = None, + namespace: Optional[str] = None, + ) -> None: + self._span.add_event( + name=name, + attributes=serialize( + namespace=namespace, + attributes=attributes, + ), + timestamp=timestamp, + ) + + def add_link( + self, + context: SpanContext, + attributes: Optional[Dict[str, Any]] = None, + namespace: Optional[str] = None, + ) -> None: + self._span.add_link( + context=context, + attributes=serialize( + namespace=namespace, + attributes=attributes, + ), + ) + + def record_exception( + self, + exception: BaseException, + attributes: Optional[Dict[str, Any]] = None, + timestamp: Optional[int] = None, + escaped: bool = False, + namespace: Optional[str] = None, + ) -> None: + self._span.record_exception( + exception=exception, + attributes=serialize( + namespace=namespace, + attributes=attributes, + ), + timestamp=timestamp, + escaped=escaped, + ) diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/tracing/tracing.py b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/tracing.py new file mode 100644 index 0000000000..809c864936 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/tracing/tracing.py @@ -0,0 +1,235 @@ +from typing import Optional, Any, Dict, Callable +from enum import Enum + +from httpx import get as check + +from opentelemetry.trace import ( + get_current_span, + set_tracer_provider, + get_tracer_provider, + Status, + StatusCode, +) +from opentelemetry.sdk.trace import Span, Tracer, TracerProvider +from opentelemetry.sdk.resources import Resource + +from agenta.sdk.utils.singleton import Singleton +from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.utils.logging import log +from agenta.sdk.tracing.processors import TraceProcessor +from agenta.sdk.tracing.exporters import InlineExporter, OTLPExporter +from agenta.sdk.tracing.spans import CustomSpan +from agenta.sdk.tracing.inline import parse_inline_trace +from agenta.sdk.tracing.conventions import Reference, is_valid_attribute_key + + +class Tracing(metaclass=Singleton): + VERSION = "0.1.0" + + Status = Status + StatusCode = StatusCode + + def __init__( + self, + url: str, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + ) -> None: + # ENDPOINT (OTLP) + self.otlp_url = url + # HEADERS (OTLP) + self.headers: Dict[str, str] = dict() + # REFERENCES + self.references: Dict[str, str] = dict() + + # TRACER PROVIDER + self.tracer_provider: Optional[TracerProvider] = None + # TRACE PROCESSORS -- INLINE + self.inline: Optional[TraceProcessor] = None + # TRACER + self.tracer: Optional[Tracer] = None + # INLINE SPANS for INLINE TRACES (INLINE PROCESSOR) + self.inline_spans: Dict[str, Any] = dict() + + # REDACT + self.redact = redact + self.redact_on_error = redact_on_error + + # PUBLIC + + def configure( + self, + api_key: Optional[str] = None, + # DEPRECATING + app_id: Optional[str] = None, + ): + # HEADERS (OTLP) + if api_key: + self.headers["Authorization"] = api_key + # REFERENCES + if app_id: + self.references["application.id"] = app_id + + # TRACER PROVIDER + self.tracer_provider = TracerProvider( + resource=Resource(attributes={"service.name": "agenta-sdk"}) + ) + # TRACE PROCESSORS -- INLINE + self.inline = TraceProcessor( + InlineExporter( + registry=self.inline_spans, + ), + references=self.references, + ) + self.tracer_provider.add_span_processor(self.inline) + # TRACE PROCESSORS -- OTLP + try: + log.info("--------------------------------------------") + log.info( + "Agenta SDK - connecting to otlp receiver at: %s", + self.otlp_url, + ) + log.info("--------------------------------------------") + check( + self.otlp_url, + headers=self.headers, + timeout=1, + ) + + _otlp = TraceProcessor( + OTLPExporter( + endpoint=self.otlp_url, + headers=self.headers, + ), + references=self.references, + ) + + self.tracer_provider.add_span_processor(_otlp) + log.info("Success: traces will be exported.") + log.info("--------------------------------------------") + except: # pylint: disable=bare-except + log.warning("Agenta SDK - traces will not be exported.") + + # GLOBAL TRACER PROVIDER -- INSTRUMENTATION LIBRARIES + set_tracer_provider(self.tracer_provider) + # TRACER + self.tracer: Tracer = self.tracer_provider.get_tracer("agenta.tracer") + + def get_current_span(self): + _span = None + + with suppress(): + _span = get_current_span() + + if _span.is_recording(): + return CustomSpan(_span) + + return _span + + def store_internals( + self, + attributes: Dict[str, Any], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + span.set_attributes( + attributes={"internals": attributes}, + namespace="data", + ) + + def store_refs( + self, + refs: Dict[str, str], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + for key in refs.keys(): + if key in [_.value for _ in Reference.__members__.values()]: + # ADD REFERENCE TO THIS SPAN + span.set_attribute( + key.value if isinstance(key, Enum) else key, + refs[key], + namespace="refs", + ) + + # AND TO ALL SPANS CREATED AFTER THIS ONE + self.references[key] = refs[key] + # TODO: THIS SHOULD BE REPLACED BY A TRACE CONTEXT !!! + + def store_meta( + self, + meta: Dict[str, Any], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + for key in meta.keys(): + if is_valid_attribute_key(key): + span.set_attribute( + key, + meta[key], + namespace="meta", + ) + + def store_metrics( + self, + metrics: Dict[str, Any], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + for key in metrics.keys(): + if is_valid_attribute_key(key): + span.set_attribute( + key, + metrics[key], + namespace="metrics", + ) + + def is_inline_trace_ready( + self, + trace_id: Optional[int] = None, + ) -> bool: + is_ready = True + + with suppress(): + if trace_id is not None: + is_ready = self.inline.is_ready(trace_id) + + return is_ready + + def get_inline_trace( + self, + trace_id: Optional[int] = None, + ) -> Dict[str, Any]: + _inline_trace = {} + + with suppress(): + is_ready = self.inline.is_ready(trace_id) + + if is_ready is True: + otel_spans = self.inline.fetch(trace_id) + + if otel_spans: + _inline_trace = parse_inline_trace(otel_spans) + + return _inline_trace + + +def get_tracer( + tracing: Tracing, +) -> Tracer: + if tracing is None or tracing.tracer is None or tracing.tracer_provider is None: + return get_tracer_provider().get_tracer("default.tracer") + + return tracing.tracer diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/types.py b/agenta-cli/debugging/simple-app/agenta/sdk/types.py new file mode 100644 index 0000000000..cefe92825a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/types.py @@ -0,0 +1,250 @@ +import json +from dataclasses import dataclass +from typing import Dict, List, Optional, Any, Union + +from pydantic import ConfigDict, BaseModel, HttpUrl + +from agenta.client.backend.types.agenta_node_dto import AgentaNodeDto +from agenta.client.backend.types.agenta_nodes_response import AgentaNodesResponse + + +@dataclass +class MultipleChoice: + choices: Union[List[str], Dict[str, List[str]]] + + +class InFile: + def __init__(self, file_name: str, file_path: str): + self.file_name = file_name + self.file_path = file_path + + +class LLMTokenUsage(BaseModel): + completion_tokens: int + prompt_tokens: int + total_tokens: int + + +class BaseResponse(BaseModel): + version: Optional[str] = "3.0" + data: Optional[Union[str, Dict[str, Any]]] = None + tree: Optional[AgentaNodesResponse] = None + + +class DictInput(dict): + def __new__(cls, default_keys: Optional[List[str]] = None): + instance = super().__new__(cls, default_keys) + if default_keys is None: + default_keys = [] + instance.data = [key for key in default_keys] # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "dict"} + + +class TextParam(str): + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "text", "type": "string"} + + +class BinaryParam(int): + def __new__(cls, value: bool = False): + instance = super().__new__(cls, int(value)) + instance.default = value # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return { + "x-parameter": "bool", + "type": "boolean", + } + + +class IntParam(int): + def __new__(cls, default: int = 6, minval: float = 1, maxval: float = 10): + instance = super().__new__(cls, default) + instance.minval = minval # type: ignore + instance.maxval = maxval # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "int", "type": "integer"} + + +class FloatParam(float): + def __new__(cls, default: float = 0.5, minval: float = 0.0, maxval: float = 1.0): + instance = super().__new__(cls, default) + instance.default = default # type: ignore + instance.minval = minval # type: ignore + instance.maxval = maxval # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "float", "type": "number"} + + +class MultipleChoiceParam(str): + def __new__( + cls, default: Optional[str] = None, choices: Optional[List[str]] = None + ): + if default is not None and type(default) is list: + raise ValueError( + "The order of the parameters for MultipleChoiceParam is wrong! It's MultipleChoiceParam(default, choices) and not the opposite" + ) + + if not default and choices is not None: + # if a default value is not provided, + # set the first value in the choices list + default = choices[0] + + if default is None and not choices: + # raise error if no default value or choices is provided + raise ValueError("You must provide either a default value or choices") + + instance = super().__new__(cls, default) + instance.choices = choices # type: ignore + instance.default = default # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "choice", "type": "string", "enum": []} + + +class GroupedMultipleChoiceParam(str): + def __new__( + cls, + default: Optional[str] = None, + choices: Optional[Dict[str, List[str]]] = None, + ): + if choices is None: + choices = {} + if default and not any( + default in choice_list for choice_list in choices.values() + ): + if not choices: + print( + f"Warning: Default value {default} provided but choices are empty." + ) + else: + raise ValueError( + f"Default value {default} is not in the provided choices" + ) + + if not default: + default_selected_choice = next( + (choices for choices in choices.values()), None + ) + if default_selected_choice: + default = default_selected_choice[0] + + instance = super().__new__(cls, default) + instance.choices = choices # type: ignore + instance.default = default # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return { + "x-parameter": "grouped_choice", + "type": "string", + } + + +class MessagesInput(list): + """Messages Input for Chat-completion. + + Args: + messages (List[Dict[str, str]]): The list of messages inputs. + Required. Each message should be a dictionary with "role" and "content" keys. + + Raises: + ValueError: If `messages` is not specified or empty. + + """ + + def __new__(cls, messages: List[Dict[str, str]] = []): + instance = super().__new__(cls) + instance.default = messages # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "messages", "type": "array"} + + +class FileInputURL(HttpUrl): + def __new__(cls, url: str): + instance = super().__new__(cls, url) + instance.default = url # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "file_url", "type": "string"} + + +class Context(BaseModel): + model_config = ConfigDict(extra="allow") + + def to_json(self): + return self.model_dump() + + @classmethod + def from_json(cls, json_str: str): + data = json.loads(json_str) + return cls(**data) + + +class ReferencesResponse(BaseModel): + app_id: Optional[str] = None + app_slug: Optional[str] = None + variant_id: Optional[str] = None + variant_slug: Optional[str] = None + variant_version: Optional[int] = None + environment_id: Optional[str] = None + environment_slug: Optional[str] = None + environment_version: Optional[int] = None + + def __str__(self): + return str(self.model_dump(exclude_none=True)) + + +class LifecyclesResponse(ReferencesResponse): + committed_at: Optional[str] = None + committed_by: Optional[str] = None + committed_by_id: Optional[str] = None + deployed_at: Optional[str] = None + deployed_by: Optional[str] = None + deployed_by_id: Optional[str] = None + + def __str__(self): + return self.model_dump_json(indent=4) + + def __repr__(self): + return self.__str__() + + +class ConfigurationResponse(LifecyclesResponse): + params: Dict[str, Any] + + +class DeploymentResponse(LifecyclesResponse): + pass + + +class Prompt(BaseModel): + temperature: float + model: str + max_tokens: int + prompt_system: str + prompt_user: str + top_p: float + frequency_penalty: float + presence_penalty: float diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/__init__.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/costs.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/costs.py new file mode 100644 index 0000000000..667ae27cda --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/costs.py @@ -0,0 +1,169 @@ +# https://raw.githubusercontent.com/langchain-ai/langchain/23eb480c3866db8693a3a2d63b787c898c54bb35/libs/community/langchain_community/callbacks/openai_info.py +MODEL_COST_PER_1K_TOKENS = { + # GPT-4 input + "gpt-4": 0.03, + "gpt-4-0314": 0.03, + "gpt-4-0613": 0.03, + "gpt-4-32k": 0.06, + "gpt-4-32k-0314": 0.06, + "gpt-4-32k-0613": 0.06, + "gpt-4-vision-preview": 0.01, + "gpt-4-1106-preview": 0.01, + # GPT-4 output + "gpt-4-completion": 0.06, + "gpt-4-0314-completion": 0.06, + "gpt-4-0613-completion": 0.06, + "gpt-4-32k-completion": 0.12, + "gpt-4-32k-0314-completion": 0.12, + "gpt-4-32k-0613-completion": 0.12, + "gpt-4-vision-preview-completion": 0.03, + "gpt-4-1106-preview-completion": 0.03, + # GPT-3.5 input + "gpt-3.5-turbo": 0.0015, + "gpt-3.5-turbo-0301": 0.0015, + "gpt-3.5-turbo-0613": 0.0015, + "gpt-3.5-turbo-1106": 0.001, + "gpt-3.5-turbo-instruct": 0.0015, + "gpt-3.5-turbo-16k": 0.003, + "gpt-3.5-turbo-16k-0613": 0.003, + # GPT-3.5 output + "gpt-3.5-turbo-completion": 0.002, + "gpt-3.5-turbo-0301-completion": 0.002, + "gpt-3.5-turbo-0613-completion": 0.002, + "gpt-3.5-turbo-1106-completion": 0.002, + "gpt-3.5-turbo-instruct-completion": 0.002, + "gpt-3.5-turbo-16k-completion": 0.004, + "gpt-3.5-turbo-16k-0613-completion": 0.004, + # Azure GPT-35 input + "gpt-35-turbo": 0.0015, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0301": 0.0015, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0613": 0.0015, + "gpt-35-turbo-instruct": 0.0015, + "gpt-35-turbo-16k": 0.003, + "gpt-35-turbo-16k-0613": 0.003, + # Azure GPT-35 output + "gpt-35-turbo-completion": 0.002, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0301-completion": 0.002, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0613-completion": 0.002, + "gpt-35-turbo-instruct-completion": 0.002, + "gpt-35-turbo-16k-completion": 0.004, + "gpt-35-turbo-16k-0613-completion": 0.004, + # Others + "text-embedding-ada-002": 0.1, + "text-ada-002": 0.1, + "adav2": 0.1, + "text-ada-001": 0.0004, + "ada": 0.0004, + "text-babbage-001": 0.0005, + "babbage": 0.0005, + "text-curie-001": 0.002, + "curie": 0.002, + "text-davinci-003": 0.02, + "text-davinci-002": 0.02, + "code-davinci-002": 0.02, + # Fine Tuned input + "babbage-002-finetuned": 0.0016, + "davinci-002-finetuned": 0.012, + "gpt-3.5-turbo-0613-finetuned": 0.012, + # Fine Tuned output + "babbage-002-finetuned-completion": 0.0016, + "davinci-002-finetuned-completion": 0.012, + "gpt-3.5-turbo-0613-finetuned-completion": 0.016, + # Azure Fine Tuned input + "babbage-002-azure-finetuned": 0.0004, + "davinci-002-azure-finetuned": 0.002, + "gpt-35-turbo-0613-azure-finetuned": 0.0015, + # Azure Fine Tuned output + "babbage-002-azure-finetuned-completion": 0.0004, + "davinci-002-azure-finetuned-completion": 0.002, + "gpt-35-turbo-0613-azure-finetuned-completion": 0.002, + # Legacy fine-tuned models + "ada-finetuned-legacy": 0.0016, + "babbage-finetuned-legacy": 0.0024, + "curie-finetuned-legacy": 0.012, + "davinci-finetuned-legacy": 0.12, +} + + +def standardize_model_name( + model_name: str, + is_completion: bool = False, +) -> str: + """ + Standardize the model name to a format that can be used in the OpenAI API. + + Args: + model_name: Model name to standardize. + is_completion: Whether the model is used for completion or not. + Defaults to False. + + Returns: + Standardized model name. + """ + + model_name = model_name.lower() + if ".ft-" in model_name: + model_name = model_name.split(".ft-")[0] + "-azure-finetuned" + if ":ft-" in model_name: + model_name = model_name.split(":")[0] + "-finetuned-legacy" + if "ft:" in model_name: + model_name = model_name.split(":")[1] + "-finetuned" + if is_completion and ( + model_name.startswith("gpt-4") + or model_name.startswith("gpt-3.5") + or model_name.startswith("gpt-35") + or ("finetuned" in model_name and "legacy" not in model_name) + ): + return model_name + "-completion" + else: + return model_name + + +def get_openai_token_cost_for_model( + model_name: str, num_tokens: int, is_completion: bool = False +) -> float: + """ + Get the cost in USD for a given model and number of tokens. + + Args: + model_name: Name of the model + num_tokens: Number of tokens. + is_completion: Whether the model is used for completion or not. + Defaults to False. + + Returns: + Cost in USD. + """ + + model_name = standardize_model_name(model_name, is_completion=is_completion) + if model_name not in MODEL_COST_PER_1K_TOKENS: + raise ValueError( + f"Unknown model: {model_name}. Please provide a valid OpenAI model name." + "Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys()) + ) + return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000) + + +def calculate_token_usage(model_name: str, token_usage: dict) -> float: + """Calculates the total cost of using a language model based on the model name and token + usage. + + Args: + model_name: The name of the model used to determine the cost per token. + token_usage: Contains information about the usage of tokens for a particular model. + + Returns: + Total cost of using a model. + """ + + completion_tokens = token_usage.get("completion_tokens", 0) + prompt_tokens = token_usage.get("prompt_tokens", 0) + model_name = standardize_model_name(model_name) + if model_name in MODEL_COST_PER_1K_TOKENS: + completion_cost = get_openai_token_cost_for_model( + model_name, completion_tokens, is_completion=True + ) + prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) + total_cost = prompt_cost + completion_cost + return total_cost + return 0 diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/exceptions.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/exceptions.py new file mode 100644 index 0000000000..a451b1de78 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/exceptions.py @@ -0,0 +1,58 @@ +from contextlib import AbstractContextManager +from traceback import format_exc +from functools import wraps +from inspect import iscoroutinefunction + +from agenta.sdk.utils.logging import log + + +class suppress(AbstractContextManager): # pylint: disable=invalid-name + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, exc_tb): + if exc_type is None: + return True + else: + log.warning("-------------------------------------------------") + log.warning("Agenta SDK - suppressing tracing exception below:") + log.warning("-------------------------------------------------") + log.warning(format_exc().strip("\n")) + log.warning("-------------------------------------------------") + return True + + +def handle_exceptions(): + def decorator(func): + is_coroutine_function = iscoroutinefunction(func) + + @wraps(func) + async def async_wrapper(*args, **kwargs): + try: + return await func(*args, **kwargs) + except Exception as e: + log.warning("------------------------------------------") + log.warning("Agenta SDK - intercepting exception below:") + log.warning("------------------------------------------") + log.warning(format_exc().strip("\n")) + log.warning("------------------------------------------") + raise e + + @wraps(func) + def sync_wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + log.warning("------------------------------------------") + log.warning("Agenta SDK - intercepting exception below:") + log.warning("------------------------------------------") + log.warning(format_exc().strip("\n")) + log.warning("------------------------------------------") + raise e + + return async_wrapper if is_coroutine_function else sync_wrapper + + return decorator diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/globals.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/globals.py new file mode 100644 index 0000000000..f05141e089 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/globals.py @@ -0,0 +1,14 @@ +import agenta + + +def set_global(config=None, tracing=None): + """Allows usage of agenta.config and agenta.tracing in the user's code. + + Args: + config: _description_. Defaults to None. + tracing: _description_. Defaults to None. + """ + if config is not None: + agenta.config = config + if tracing is not None: + agenta.tracing = tracing diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/helpers.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/helpers.py new file mode 100644 index 0000000000..da0aac650f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/helpers.py @@ -0,0 +1,8 @@ +import importlib.metadata + + +def get_current_version(): + """Returns the current version of Agenta's SDK.""" + + version = importlib.metadata.version("agenta") + return version diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/logging.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/logging.py new file mode 100644 index 0000000000..cc65d67e7c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/logging.py @@ -0,0 +1,21 @@ +import logging +from os import getenv + + +class Logger: + def __init__(self, name="agenta.logger", level=logging.WARNING): + if getenv("AGENTA_DEBUG"): + level = logging.DEBUG + + self.logger = logging.getLogger(name) + self.logger.setLevel(level) + + console_handler = logging.StreamHandler() + self.logger.addHandler(console_handler) + + @property + def log(self) -> logging.Logger: + return self.logger + + +log = Logger().log diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/preinit.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/preinit.py new file mode 100644 index 0000000000..f039b149f9 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/preinit.py @@ -0,0 +1,41 @@ +from typing import Any, Optional +from dotenv import load_dotenv + +# from .context import setup_db + +load_dotenv() +# setup_db() + + +class PreInitObject: + """Dummy object that raises an error when accessed a class before agenta.init() is called.""" + + def __init__(self, name: str, destination: Optional[Any] = None) -> None: + self._name = name + + if destination is not None: + self.__doc__ = destination.__doc__ + + def __getitem__(self, key: str) -> None: + raise RuntimeError( + f"You must call agenta.init() before accessing {self._name}[{key!r}]" + ) + + def __setitem__(self, key: str, value: Any) -> Any: + raise RuntimeError( + f"You must call agenta.init() before setting {self._name}[{key!r}]" + ) + + def __setattr__(self, key: str, value: Any) -> Any: + if not key.startswith("_"): + raise RuntimeError( + f"You must call agenta.init() before {self._name}[{key!r}]" + ) + else: + return object.__setattr__(self, key, value) + + def __getattr__(self, key: str) -> Any: + if not key.startswith("_"): + raise RuntimeError(f"You must call agenta.init() before {self._name}.{key}") + else: + raise AttributeError diff --git a/agenta-cli/debugging/simple-app/agenta/sdk/utils/singleton.py b/agenta-cli/debugging/simple-app/agenta/sdk/utils/singleton.py new file mode 100644 index 0000000000..5140515331 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/sdk/utils/singleton.py @@ -0,0 +1,13 @@ +from threading import Lock + + +class Singleton(type): + _instances = {} + + _lock: Lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + cls._instances[cls] = super().__call__(*args, **kwargs) + return cls._instances[cls] diff --git a/agenta-cli/debugging/simple-app/agenta/templates/compose_email/README.md b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/README.md new file mode 100644 index 0000000000..757455e2ca --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/README.md @@ -0,0 +1,9 @@ +# Using this template + +Please make sure to create a `.env` file with your OpenAI API key before running the app. +OPENAI_API_KEY=sk-xxxxxxx + +You can find your keys here: +https://platform.openai.com/account/api-keys + +Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/compose_email/app.py b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/app.py new file mode 100644 index 0000000000..3883a31725 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/app.py @@ -0,0 +1,63 @@ +import agenta as ag +from langchain.chains import LLMChain +from langchain.llms import OpenAI +from langchain.prompts import PromptTemplate + +default_prompt = """ +**Write an email** from {from_sender} to {to_receiver} with the designated tone and style: {email_style}. The primary content of the email is: {email_content}. + +Use the following format: +Subject: + + + +**Procedure**: + +**(1) Determine the primary talking points of the email:** +1. Identify the central theme of the provided content. +2. Extract secondary messages or supporting points. + +**(2) Frame sentences for each talking point, keeping in mind the given tone and style {{ style }}:** +3. Create a compelling opening sentence that sets the tone and introduces the main theme. +4. Formulate sentences that add depth or context to each of the previously identified talking points. + +**(3) Draft the initial version of the email:** +Use the sentences crafted in the previous step to compose a coherent and engaging email. Ensure that the flow feels natural and that each sentence transitions smoothly to the next. + +**(4) Analyze the email and list ways to refine it:** +5. Identify areas where the message might be unclear or could benefit from additional information. +6. Consider places where the language or tone might be enhanced to be more persuasive or emotive. +7. Evaluate if the email adheres to the style directive and, if not, identify deviations. + +**(5) Re-write the email by applying the insights gained:** +Rework the initial draft, incorporating the improvements identified in the previous step. Aim to present the message as effectively as possible while strictly adhering to the prescribed tone and style. + +""" + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.9), prompt_template=ag.TextParam(default_prompt) +) + + +@ag.entrypoint +def generate( + from_sender: str, + to_receiver: str, + email_style: str, + email_content: str, +) -> str: + llm = OpenAI(temperature=ag.config.temperature) + prompt = PromptTemplate( + input_variables=["from_sender", "to_receiver", "email_style", "email_content"], + template=ag.config.prompt_template, + ) + chain = LLMChain(llm=llm, prompt=prompt) + output = chain.run( + from_sender=from_sender, + to_receiver=to_receiver, + email_style=email_style, + email_content=email_content, + ) + + return output diff --git a/agenta-cli/debugging/simple-app/agenta/templates/compose_email/env.example b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/env.example new file mode 100644 index 0000000000..0bd3c56d64 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/env.example @@ -0,0 +1,2 @@ +# Rename this file to .env +OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx diff --git a/agenta-cli/debugging/simple-app/agenta/templates/compose_email/requirements.txt b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/requirements.txt new file mode 100644 index 0000000000..2f1e01b99f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/requirements.txt @@ -0,0 +1,3 @@ +langchain +openai +agenta \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/compose_email/template.toml b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/template.toml new file mode 100644 index 0000000000..3f5f2b503a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/compose_email/template.toml @@ -0,0 +1 @@ +short_desc="Simple app that composes an email." \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/README.md b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/README.md new file mode 100644 index 0000000000..757455e2ca --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/README.md @@ -0,0 +1,9 @@ +# Using this template + +Please make sure to create a `.env` file with your OpenAI API key before running the app. +OPENAI_API_KEY=sk-xxxxxxx + +You can find your keys here: +https://platform.openai.com/account/api-keys + +Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/app.py b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/app.py new file mode 100644 index 0000000000..633d59d38c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/app.py @@ -0,0 +1,53 @@ +import agenta as ag +from openai import OpenAI + +client = OpenAI() +import json + +default_prompt = """You are a world class algorithm for extracting information in structured formats. Extract information and create a valid JSON from the following input: {text}""" +function_json_string = """ +{ + "name": "extract_information", + "description": "Extract information from user-provided text", + "parameters": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "The text to extract information from" + } + } + } +} +""" + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.9), + prompt_template=ag.TextParam(default_prompt), + function_json=ag.TextParam(function_json_string), +) + + +@ag.entrypoint +def generate( + text: str, +) -> str: + messages = [ + { + "role": "user", + "content": ag.config.prompt_template.format(text=text), + }, + ] + + function = json.loads(ag.config.function_json) + + response = client.chat.completions.create( + model="gpt-3.5-turbo-0613", + messages=messages, + temperature=ag.config.temperature, + functions=[function], + ) + + output = str(response["choices"][0]["message"]["function_call"]) + return output diff --git a/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/env.example b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/env.example new file mode 100644 index 0000000000..0bd3c56d64 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/env.example @@ -0,0 +1,2 @@ +# Rename this file to .env +OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx diff --git a/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/requirements.txt b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/requirements.txt new file mode 100644 index 0000000000..2f1e01b99f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/requirements.txt @@ -0,0 +1,3 @@ +langchain +openai +agenta \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/template.toml b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/template.toml new file mode 100644 index 0000000000..41a613d378 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/extract_data_to_json/template.toml @@ -0,0 +1 @@ +short_desc="Simple app that extracts data to JSON from text" \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/README.md b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/README.md new file mode 100644 index 0000000000..757455e2ca --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/README.md @@ -0,0 +1,9 @@ +# Using this template + +Please make sure to create a `.env` file with your OpenAI API key before running the app. +OPENAI_API_KEY=sk-xxxxxxx + +You can find your keys here: +https://platform.openai.com/account/api-keys + +Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/app.py b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/app.py new file mode 100644 index 0000000000..8cb809ee55 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/app.py @@ -0,0 +1,24 @@ +import agenta as ag +from langchain.chains import LLMChain +from langchain.llms import OpenAI +from langchain.prompts import PromptTemplate + +default_prompt = "What is a good name for a company that makes {product}?" + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.9), + prompt_template=ag.TextParam(default_prompt), +) + + +@ag.entrypoint +def generate(product: str) -> str: + llm = OpenAI(temperature=ag.config.temperature) + prompt = PromptTemplate( + input_variables=["product"], template=ag.config.prompt_template + ) + chain = LLMChain(llm=llm, prompt=prompt) + output = chain.run(product=product) + + return output diff --git a/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/env.example b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/env.example new file mode 100644 index 0000000000..0bd3c56d64 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/env.example @@ -0,0 +1,2 @@ +# Rename this file to .env +OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx diff --git a/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/requirements.txt b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/requirements.txt new file mode 100644 index 0000000000..2f1e01b99f --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/requirements.txt @@ -0,0 +1,3 @@ +langchain +openai +agenta \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/template.toml b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/template.toml new file mode 100644 index 0000000000..6b1d9b21c1 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/templates/simple_prompt/template.toml @@ -0,0 +1 @@ +short_desc="Simple app that uses one prompt using langchain" \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/tests/__init__.py b/agenta-cli/debugging/simple-app/agenta/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_baby_name.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_baby_name.py new file mode 100644 index 0000000000..41b4577af6 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_baby_name.py @@ -0,0 +1,46 @@ +import agenta as ag +from agenta import FloatParam, TextParam +from openai import OpenAI +from pydantic import BaseModel, Field + +client = OpenAI() +import os + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init() + + +class BabyConfig(BaseModel): + temperature: float = Field(default=0.2) + prompt_template: str = Field(default=default_prompt) + + +@ag.route("/", config_schema=BabyConfig) +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name. + """ + config = ag.ConfigManager.get_from_route(schema=BabyConfig) + prompt = config.prompt_template.format(country=country, gender=gender) + + chat_completion = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": prompt}], + temperature=config.temperature, + ) + token_usage = chat_completion.usage.dict() + return { + "message": chat_completion.choices[0].message.content, + **{"usage": token_usage}, + "cost": ag.calculate_token_usage("gpt-3.5-turbo", token_usage), + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_chat_prompt.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_chat_prompt.py new file mode 100644 index 0000000000..c752f4a713 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_chat_prompt.py @@ -0,0 +1,103 @@ +from typing import Annotated + +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models +from pydantic import BaseModel, Field + +litellm.drop_params = True + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init(config_fname="config.toml") + + +class MyConfig(BaseModel): + temperature: float = Field(default=1, ge=0.0, le=2.0) + model: Annotated[str, ag.MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo" + ) + max_tokens: int = Field(default=-1, ge=-1, le=4000) + prompt_system: str = Field(default=prompts["system_prompt"]) + prompt_user: str = Field(default=prompts["user_prompt"]) + top_p: float = Field(default=1) + frequence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0) + presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0) + force_json: bool = Field(default=False) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + response_format = ( + {"type": "json_object"} + if config.force_json and config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + + max_tokens = config.max_tokens if config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = config.frequence_penalty + completion_params["presence_penalty"] = config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": config.temperature, + "max_tokens": max_tokens, + "top_p": config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=config.model + ), + } + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + try: + prompt_user = config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = config.prompt_user + try: + prompt_system = config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = config.prompt_system + + # SET MAX TOKENS - via completion() + if config.force_json and config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py new file mode 100644 index 0000000000..ea4ad8e45d --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py @@ -0,0 +1,61 @@ +from typing import Annotated, Any, Dict, List + +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models +from pydantic import BaseModel, Field + +litellm.drop_params = True + +SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." + +ag.init() + + +class MyConfig(BaseModel): + temperature: float = Field(default=0.2, le=1, ge=0) + model: Annotated[str, ag.MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo" + ) + max_tokens: int = Field(default=-1, ge=-1, le=4000) + prompt_system: str = Field(default=SYSTEM_PROMPT) + multiselect: Annotated[str, ag.MultipleChoice(choices=["a", "b", "c"])] = Field( + default="a" + ) + + +@ag.route("/llm_call", config_schema=MyConfig) +@ag.instrument(spankind="llm") +async def llm_call(messages: List[Dict[str, Any]], maxtokens): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + chat_completion = await litellm.acompletion( + model=config.model, + messages=messages, + temperature=config.temperature, + max_tokens=maxtokens, + ) + token_usage = chat_completion.usage.dict() + return { + "usage": token_usage, + "message": chat_completion.choices[0].message.content, + "cost": litellm.cost_calculator.completion_cost( + completion_response=chat_completion, model=config.model + ), + } + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def chat(inputs: ag.MessagesInput = ag.MessagesInput()) -> Dict[str, Any]: + config = ag.ConfigManager.get_from_route(schema=MyConfig) + messages = [{"role": "system", "content": config.prompt_system}] + inputs + max_tokens = config.max_tokens if config.max_tokens != -1 else None + response = await llm_call( + messages=messages, + maxtokens=max_tokens, + ) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/requirements.txt b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/requirements.txt new file mode 100644 index 0000000000..6813723fcd --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/requirements.txt @@ -0,0 +1,3 @@ +agenta +openai +litellm \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/run_local.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/run_local.py new file mode 100644 index 0000000000..94c52aa381 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/run_local.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import new_chat_prompt # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/v3.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/v3.py new file mode 100644 index 0000000000..5bbbe17ffd --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/v3.py @@ -0,0 +1,53 @@ +import os +from typing import Annotated + +import agenta as ag +from agenta.sdk.managers.config import ConfigManager +from pydantic import BaseModel, Field + +os.environ["AGENTA_MODE"] = "true" + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init() + + +class NestConfig(BaseModel): + some_param: str = "hello" + + +class MyConfigSchema(BaseModel): # <- the app + prompt_1: ag.Prompt = ag.Prompt(prompt_system="hello") + prompt_2: ag.Prompt = ag.Prompt(prompt_system="hello") + nest_config: NestConfig = NestConfig() + + +@ag.route( + path="/", config_schema=MyConfigSchema, is_active=os.environ.get("AGENTA_MODE") +) +def rag(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + if os.environ.get("AGENTA_MODE") == "true": + config = ConfigManager.get_from_route(schema=MyConfigSchema) + else: + config = ConfigManager.get_from_registry( + schema=MyConfigSchema, environment="production" + ) + prompt = config.pro.format(country=country, gender=gender) + + return f"mock output for {prompt}" + + +if __name__ == "__main__": + rag(country="USA", gender="male") diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/baby_name.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/baby_name.py new file mode 100644 index 0000000000..41dd9937f5 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/baby_name.py @@ -0,0 +1,39 @@ +import agenta as ag +from agenta import FloatParam, TextParam +from openai import OpenAI + +client = OpenAI() + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") +ag.config.default( + temperature=FloatParam(0.2), prompt_template=TextParam(default_prompt) +) + + +@ag.entrypoint +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name. + """ + prompt = ag.config.prompt_template.format(country=country, gender=gender) + + chat_completion = client.chat.completions.create( + model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}] + ) + token_usage = chat_completion.usage.dict() + return { + "message": chat_completion.choices[0].message.content, + **{"usage": token_usage}, + "cost": ag.calculate_token_usage("gpt-3.5-turbo", token_usage), + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/chat_prompt.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/chat_prompt.py new file mode 100644 index 0000000000..e6a0826c93 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/chat_prompt.py @@ -0,0 +1,96 @@ +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models + +litellm.drop_params = True + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init() +ag.config.default( + temperature=ag.FloatParam(default=1, minval=0.0, maxval=2.0), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=supported_llm_models + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(prompts["system_prompt"]), + prompt_user=ag.TextParam(prompts["user_prompt"]), + top_p=ag.FloatParam(1), + frequence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + presence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + force_json=ag.BinaryParam(False), +) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + response_format = ( + {"type": "json_object"} + if ag.config.force_json and ag.config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if ag.config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = ag.config.frequence_penalty + completion_params["presence_penalty"] = ag.config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": ag.config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": ag.config.temperature, + "max_tokens": max_tokens, + "top_p": ag.config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + try: + prompt_user = ag.config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = ag.config.prompt_user + try: + prompt_system = ag.config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = ag.config.prompt_system + + # SET MAX TOKENS - via completion() + if ag.config.force_json and ag.config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(ag.config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/requirements.txt b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/requirements.txt new file mode 100644 index 0000000000..6813723fcd --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/requirements.txt @@ -0,0 +1,3 @@ +agenta +openai +litellm \ No newline at end of file diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/run_local.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/run_local.py new file mode 100644 index 0000000000..65c782f3af --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/run_local.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import baby_name # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/single_prompt.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/single_prompt.py new file mode 100644 index 0000000000..02dd81cb6a --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/single_prompt.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models + +litellm.drop_params = True + +SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.2), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=supported_llm_models + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(SYSTEM_PROMPT), + multiselect=ag.MultipleChoiceParam(choices=["a", "b", "c"]), +) + + +@ag.instrument(spankind="llm") +async def llm_call(messages: List[Dict[str, Any]], max_tokens: int): + chat_completion = await litellm.acompletion( + model=ag.config.model, + messages=messages, + temperature=ag.config.temperature, + max_tokens=max_tokens, + ) + token_usage = chat_completion.usage.dict() + return { + "usage": token_usage, + "message": chat_completion.choices[0].message.content, + "cost": litellm.cost_calculator.completion_cost( + completion_response=chat_completion, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def chat(inputs: ag.MessagesInput = ag.MessagesInput()) -> Dict[str, Any]: + messages = [{"role": "system", "content": ag.config.prompt_system}] + inputs + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + response = await llm_call( + messages=messages, + max_tokens=max_tokens, + ) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/run_local.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/run_local.py new file mode 100644 index 0000000000..75266e1443 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/run_local.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import v3 # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v1.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v1.py new file mode 100644 index 0000000000..e7b4018201 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v1.py @@ -0,0 +1,61 @@ +from enum import Enum +from typing import List, Union, Dict +from dataclasses import dataclass +import pdb +import agenta as ag +from agenta import FloatParam, TextParam +from pydantic import BaseModel, Field +from typing import Annotated +from typing import Literal + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") + +# To add to our types + + +class MyConfig(BaseModel): + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field( + default="gpt3" + ) + # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + +config = MyConfig(prompt_template=default_prompt) + + +@ag.instrument() +def retriever(query: str) -> str: + return "mock output for " + query + + +@ag.entrypoint(config) +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + prompt = config.prompt_template.format(country=country, gender=gender) + + return { + "message": f"mock output for {prompt}", + **{"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}}, + "cost": 0.01, + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v2.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v2.py new file mode 100644 index 0000000000..718cfbe42c --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v2.py @@ -0,0 +1,62 @@ +from enum import Enum +from typing import List, Union, Dict +from dataclasses import dataclass +import pdb +import agenta as ag +from agenta import FloatParam, TextParam +from pydantic import BaseModel, Field +from typing import Annotated +from typing import Literal + +# AGENTA_MODE = TRUE +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") + +# To add to our types +# Option 1 + + +class MyConfigSchema(BaseModel): # <- the app + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field( + default="gpt3" + ) + # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + class Settings: + app_name: str = "myapp" + + +config = MyConfigSchema() + + +@ag.route(path="/", config=config) # side effects +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + + prompt = config.prompt_template.format(country=country, gender=gender) + + return { + "message": f"mock output for {prompt}", + **{"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}}, + "cost": 0.01, + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v4_prompt.py b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v4_prompt.py new file mode 100644 index 0000000000..08b581f8ab --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/management_sdk/v4_prompt.py @@ -0,0 +1,80 @@ +from enum import Enum +from typing import List, Union, Dict +from dataclasses import dataclass +import pdb +import agenta as ag +from agenta.sdk.managers.config import ConfigManager +from agenta import FloatParam, TextParam +from pydantic import BaseModel, Field +from typing import Annotated +from typing import Literal + + +# AGENTA_MODE = TRUE +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") + +# To add to our types +# Option 1 + + +# class MyConfigSchema(BaseModel): # <- the app +# prompt_template: str = Field(default=default_prompt) +# bool_param: bool = Field(default=True) +# int_param: int = Field(default=1, ge=1, le=5) +# float_param: float = Field(default=1.0, gt=0, lt=10) +# multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field(default="gpt3") +# # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") +# grouped_multiple: Annotated[str, ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]})] = Field(default="gpt3") + +# class Settings: +# app_name: str = 'myapp' + + +class Prompt(BaseModel): + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + +class MyConfigSchema(BaseModel): # <- the app + prompt: Prompt = Field(default=Prompt()) + + class settings: + app_name: str = "myapp" + + +@ag.route(path="/", config_schema=MyConfigSchema) +def rag(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + # if os.environ.get("AGENTA_CLOUD"): + # config = ag.ConfigLoader.from_route(MyConfigSchema) + # config = ag.ConfigLoader.from_backend(MyConfigSchema) + # config = ag.ConfigLoader.from_file(MyConfigSchema) + # config = ConfigManager.from_route(MyConfigSchema) + config = MyConfigSchema() + # config = ConfigManager.from_backend(MyConfigSchema) + prompt = config.prompt.prompt_template.format(country=country, gender=gender) + + return { + "message": f"mock output for {prompt}", + **{"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}}, + "cost": 0.01, + } diff --git a/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/conftest.py b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/conftest.py new file mode 100644 index 0000000000..acfc4d80ef --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/conftest.py @@ -0,0 +1,40 @@ +from typing import List, Dict, Any + +import pytest +from pydantic import BaseModel + + +class Prompt(BaseModel): + temperature: float + model: str + max_tokens: int + messages: List[Dict[str, Any]] + top_p: float + frequency_penalty: float + presence_penalty: float + + +class Parameters(BaseModel): + temperature: float + model: str + max_tokens: int + + +@pytest.fixture +def prompt(): + # Sample Prompt object to use in tests + return Prompt( + temperature=0.6, + model="gpt-3.5-turbo", + max_tokens=150, + messages=[ + { + "role": "system", + "content": "You are an assistant that provides concise answers", + }, + {"role": "user", "content": "Explain {topic} in simple terms"}, + ], + top_p=1.0, + frequency_penalty=0.0, + presence_penalty=0.0, + ) diff --git a/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_client.py b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_client.py new file mode 100644 index 0000000000..378fe2e7a0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_client.py @@ -0,0 +1,73 @@ +from unittest.mock import patch + +import pytest +from agenta.sdk.client import Agenta + + +@pytest.fixture +def agenta_client(): + # Set up the Agenta client with a mock API key + with patch.dict( + "os.environ", + {"AGENTA_API_KEY": "mock_api_key", "AGENTA_HOST": "https://mock.agenta.ai"}, + ): + client = Agenta() + return client + + +def test_get_config_with_caching(agenta_client): + """ + Test the caching mechanism of the get_config method to ensure it returns cached data. + + Args: + agenta_client: The fixture providing an instance of the Agenta client. + """ + # Setup the mock to return a predefined configuration + with patch.object( + agenta_client.client.configs, + "get_config", + return_value={"parameters": "something"}, + ) as mock_get_config: + # Retrieve configuration to store in cache + response = agenta_client.get_config("base123", "production") + assert response == { + "parameters": "something" + }, "First response should match the mock data." + + # Modify the return value of the mock + mock_get_config.return_value = {"parameters": "something else"} + + # Attempt to retrieve configuration again, expecting cached data + response = agenta_client.get_config("base123", "production") + assert response == { + "parameters": "something" + }, "Second response should return cached data, not new mock data." + + +def test_get_config_without_caching(agenta_client): + """ + Test the get_config method without caching to ensure it always fetches new data. + + Args: + agenta_client: The fixture providing an instance of the Agenta client. + """ + # Setup the mock to return a predefined configuration + with patch.object( + agenta_client.client.configs, + "get_config", + return_value={"parameters": "something"}, + ) as mock_get_config: + # Retrieve configuration with caching disabled + response = agenta_client.get_config("base123", "production", cache_timeout=0) + assert response == { + "parameters": "something" + }, "First response should match the mock data." + + # Modify the return value of the mock + mock_get_config.return_value = {"parameters": "something else"} + + # Retrieve new configuration with caching disabled + response = agenta_client.get_config("base123", "production", cache_timeout=0) + assert response == { + "parameters": "something else" + }, "Second response should match the new mock data." diff --git a/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_config_manager.py b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_config_manager.py new file mode 100644 index 0000000000..7a04ae2620 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_config_manager.py @@ -0,0 +1,182 @@ +import json +from typing import Annotated +from unittest.mock import patch + +import yaml # type: ignore +import pytest +from pydantic import BaseModel, Field + +import agenta as ag +from agenta.tests.prompt_sdk.conftest import Parameters +from agenta.sdk.managers.config import ConfigManager + + +# AGENTA_MODE = TRUE +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + + +# To add to our types +# Option 1 + + +class MyConfigSchema(BaseModel): # <- the app + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field( + default="gpt3" + ) + # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + class Settings: + app_name: str = "myapp" + + +@pytest.fixture +def sample_config(): + return { + "prompt_template": "Custom prompt: {country} {gender}", + "bool_param": False, + "int_param": 3, + "float_param": 5.5, + "multiple": "gpt-5", + "grouped_multiple": "gpt-5", + } + + +@pytest.fixture +def yaml_config_file(tmp_path, sample_config): + file_path = tmp_path / "test_config.yaml" + with open(file_path, "w") as f: + yaml.dump(sample_config, f) + return file_path + + +@pytest.fixture +def json_config_file(tmp_path, sample_config): + file_path = tmp_path / "test_config.json" + with open(file_path, "w") as f: + json.dump(sample_config, f) + return file_path + + +def test_get_from_yaml(yaml_config_file): + config = ConfigManager.get_from_yaml(str(yaml_config_file), MyConfigSchema) + assert isinstance(config, MyConfigSchema) + assert config.prompt_template == "Custom prompt: {country} {gender}" + assert config.bool_param is False + assert config.int_param == 3 + assert config.float_param == 5.5 + assert config.multiple == "gpt-5" + assert config.grouped_multiple == "gpt-5" + + +def test_get_from_json(json_config_file): + config = ConfigManager.get_from_json(str(json_config_file), MyConfigSchema) + assert isinstance(config, MyConfigSchema) + assert config.prompt_template == "Custom prompt: {country} {gender}" + assert config.bool_param is False + assert config.int_param == 3 + assert config.float_param == 5.5 + assert config.multiple == "gpt-5" + assert config.grouped_multiple == "gpt-5" + + +def test_get_from_yaml_file_not_found(): + with pytest.raises(FileNotFoundError): + ConfigManager.get_from_yaml("non_existent_file.yaml", MyConfigSchema) + + +def test_get_from_json_file_not_found(): + with pytest.raises(FileNotFoundError): + ConfigManager.get_from_json("non_existent_file.json", MyConfigSchema) + + +@patch("agenta.ConfigManager.get_from_registry") +def test_fetch_configuration_and_return_dict(mock_get_config): + # Mock the API response for fetching configuration + + mock_get_config.return_value = { + "temperature": 0.9, + "model": "gpt-3.5-turbo", + "max_tokens": 100, + } + + config = ConfigManager.get_from_registry( + app_slug="my-app", variant_slug="new-variant", variant_version=2 + ) + + assert isinstance(config, dict) + assert config["temperature"] == 0.9 + assert config["model"] == "gpt-3.5-turbo" + assert config["max_tokens"] == 100 + + +@patch("agenta.ConfigManager.get_from_registry") +def test_fetch_configuration_and_return_schema(mock_get_config): + # Mock the API response for fetching configuration + + mock_get_config.return_value = Parameters( + temperature=0.9, model="gpt-3.5-turbo", max_tokens=100 + ) + + config_as_schema = ConfigManager.get_from_registry( + schema=Parameters, + app_slug="my-app", + variant_slug="new-variant", + variant_version=2, + ) + + assert isinstance(config_as_schema, Parameters) + assert config_as_schema.temperature == 0.9 + assert config_as_schema.model == "gpt-3.5-turbo" + assert config_as_schema.max_tokens == 100 + + +@pytest.mark.asyncio +@patch("agenta.ConfigManager.aget_from_registry") +async def test_afetch_configuration_and_return_dict(mock_aget_config): + # Mock the API response for fetching configuration + + mock_aget_config.return_value = { + "temperature": 0.9, + "model": "gpt-3.5-turbo", + "max_tokens": 100, + } + + config = await ConfigManager.aget_from_registry( + app_slug="my-app", variant_slug="new-variant", variant_version=2 + ) + + assert config["temperature"] == 0.9 + assert config["model"] == "gpt-3.5-turbo" + assert config["max_tokens"] == 100 + + +@pytest.mark.asyncio +@patch("agenta.ConfigManager.aget_from_registry") +async def test_afetch_configuration_and_return_schema(mock_aget_config): + # Mock the API response for fetching configuration + + mock_aget_config.return_value = Parameters( + temperature=0.9, model="gpt-3.5-turbo", max_tokens=100 + ) + + config_as_schema = await ConfigManager.aget_from_registry( + schema=Parameters, + app_slug="my-app", + variant_slug="new-variant", + variant_version=2, + ) + + assert isinstance(config_as_schema, Parameters) + assert config_as_schema.temperature == 0.9 + assert config_as_schema.model == "gpt-3.5-turbo" + assert config_as_schema.max_tokens == 100 diff --git a/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_deployment_manager.py b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_deployment_manager.py new file mode 100644 index 0000000000..2edd37bf83 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_deployment_manager.py @@ -0,0 +1,88 @@ +from unittest.mock import patch + +import pytest + +from agenta.sdk.managers import DeploymentManager +from agenta.sdk.managers.shared import DeploymentResponse + + +@patch("agenta.DeploymentManager.deploy") +def test_deploy_variant(mock_deploy): + # Mock the API response for deploying a variant + mock_deploy.return_value = DeploymentResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-variant", + "variant_version": 2, + "environment_slug": "staging", + "deployed_at": "2023-10-02T12:30:00Z", + "deployed_by": "user@example.com", + } + ) + + deployment = DeploymentManager.deploy( + app_slug="my-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment.environment_slug == "staging" + assert deployment.deployed_by == "user@example.com" + + +@pytest.mark.asyncio +@patch("agenta.DeploymentManager.adeploy") +async def test_adeploy_variant(mock_adeploy): + # Mock the API response for deploying a variant + mock_adeploy.return_value = DeploymentResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-variant", + "variant_version": 8, + "environment_slug": "production", + "deployed_at": "2023-10-02T12:30:00Z", + "deployed_by": "abc@example.com", + } + ) + + deployment = await DeploymentManager.adeploy( + app_slug="my-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment.environment_slug == "production" + assert deployment.deployed_by == "abc@example.com" + + +@patch("agenta.DeploymentManager.deploy") +def test_deploy_variant_not_found(mock_deploy): + # Mock the API response for deploying a variant + mock_deploy.return_value = {"detail": "Config not found."} + + deployment = DeploymentManager.deploy( + app_slug="non-existent-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment["detail"] == "Config not found." # type: ignore + + +@pytest.mark.asyncio +@patch("agenta.DeploymentManager.adeploy") +async def test_adeploy_variant_not_found(mock_adeploy): + # Mock the API response for deploying a variant + mock_adeploy.return_value = {"detail": "Config not found."} + + deployment = await DeploymentManager.adeploy( + app_slug="non-existent-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment["detail"] == "Config not found." # type: ignore diff --git a/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_variant_manager.py b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_variant_manager.py new file mode 100644 index 0000000000..1c633534f3 --- /dev/null +++ b/agenta-cli/debugging/simple-app/agenta/tests/prompt_sdk/test_variant_manager.py @@ -0,0 +1,313 @@ +from unittest.mock import patch + +import pytest + +from agenta.sdk.managers import VariantManager +from agenta.sdk.managers.shared import ConfigurationResponse + + +@patch("agenta.VariantManager.create") +def test_variant_create(mock_create, prompt): + # Mock the API response for creating a variant + mock_create.return_value = ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-variant", + "variant_version": 1, + "params": prompt.model_dump(), + } + ) + + variant = VariantManager.create( + parameters=prompt.model_dump(), + variant_slug="new-variant", + app_slug="my-app", + ) + + assert variant.app_slug == "my-app" + assert variant.variant_slug == "new-variant" + assert variant.variant_version == 1 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.acreate") +async def test_variant_acreate(mock_acreate, prompt): + # Mock the API response for creating a variant + mock_acreate.return_value = ConfigurationResponse( + **{ + "app_slug": "qa-assistant", + "variant_slug": "school-assistant", + "variant_version": 1, + "params": prompt.model_dump(), + } + ) + + variant = await VariantManager.acreate( + parameters=prompt.model_dump(), + variant_slug="school-assistant", + app_slug="qa-assistant", + ) + + assert variant.app_slug == "qa-assistant" + assert variant.variant_slug == "school-assistant" + assert variant.variant_version == 1 + + +@patch("agenta.VariantManager.commit") +def test_variant_commit(mock_commit, prompt): + # Mock the API response for committing a variant + mock_commit.return_value = ConfigurationResponse( + **{ + "app_slug": "my-new-app", + "variant_slug": "new-new-variant", + "variant_version": 2, + "params": prompt.model_dump(), + } + ) + + variant = VariantManager.commit( + parameters=prompt.model_dump(), + variant_slug="new-variant", + app_slug="my-app", + ) + + assert variant.variant_version == 2 + assert type(variant.params) == dict + assert variant.params["temperature"] == 0.6 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.acommit") +async def test_variant_acommit(mock_acommit, prompt): + # Mock the API response for committing a variant + mock_acommit.return_value = ConfigurationResponse( + **{ + "app_slug": "my-new-app", + "variant_slug": "new-variant", + "variant_version": 4, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ) + + variant = await VariantManager.acommit( + parameters=prompt.model_dump(), + variant_slug="new-variant", + app_slug="my-new-app", + ) + + assert variant.variant_version == 4 + assert type(variant.params) == dict + assert variant.params["temperature"] == 1.0 + + +@patch("agenta.VariantManager.delete") +def test_variant_delete(mock_delete): + # Mock the API response for deleting a variant + mock_delete.return_value = 204 + + result = VariantManager.delete( + variant_slug="obsolete-variant", + app_slug="my-app", + ) + + assert result == 204 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.adelete") +async def test_variant_adelete(mock_adelete): + # Mock the API response for deleting a variant + mock_adelete.return_value = 204 + + result = await VariantManager.adelete( + variant_slug="obsolete-variant-2", + app_slug="my-app", + ) + + assert result == 204 + + +@patch("agenta.VariantManager.list") +def test_variant_list(mock_list, prompt): + # Mock the API response for listing variants + mock_list.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = VariantManager.list(app_slug="my-app") + + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.alist") +async def test_variant_alist(mock_alist, prompt): + # Mock the API response for listing variants + mock_alist.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = await VariantManager.alist(app_slug="my-app") + + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 + + +@patch("agenta.VariantManager.history") +def test_variant_history(mock_history, prompt): + # Mock the API response for listing variant history + mock_history.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = VariantManager.history( + variant_slug="new-app-variant", + app_id="06056815-c9d0-4cdb-bcc7-7c9e6a3fe5e3", + ) + + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.ahistory") +async def test_variant_ahistory(mock_ahistory, prompt): + # Mock the API response for listing variants + mock_ahistory.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = await VariantManager.ahistory( + variant_slug="new-app-variant", app_id="06056815-c9d0-4cdb-bcc7-7c9e6a3fe5e3" + ) + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 diff --git a/agenta-cli/debugging/simple-app/config.toml b/agenta-cli/debugging/simple-app/config.toml new file mode 100644 index 0000000000..389b22a2bf --- /dev/null +++ b/agenta-cli/debugging/simple-app/config.toml @@ -0,0 +1,6 @@ +app_name = "asdf" +app_id = "0193bbaa-4f2b-7510-9170-9bdf95249ca0" +backend_host = "https://cloud.agenta.ai" +api_key = "dWdKluoL.fc56608c5e0ce7b262e9e9a795b6a5e9371200c573cafbd975ebb6b4368b6032" +variants = [] +variant_ids = [] diff --git a/agenta-cli/debugging/simple-app/docker.tar.gz b/agenta-cli/debugging/simple-app/docker.tar.gz new file mode 100644 index 0000000000..9b6be9ad66 Binary files /dev/null and b/agenta-cli/debugging/simple-app/docker.tar.gz differ diff --git a/agenta-cli/debugging/simple-app/entrypoint.sh b/agenta-cli/debugging/simple-app/entrypoint.sh new file mode 100755 index 0000000000..3c6b353144 --- /dev/null +++ b/agenta-cli/debugging/simple-app/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +exec python main.py diff --git a/agenta-cli/debugging/simple-app/lambda_function.py b/agenta-cli/debugging/simple-app/lambda_function.py new file mode 100644 index 0000000000..ca186d6e82 --- /dev/null +++ b/agenta-cli/debugging/simple-app/lambda_function.py @@ -0,0 +1,6 @@ +import agenta +import _app +from mangum import Mangum + + +handler = Mangum(agenta.app) diff --git a/agenta-cli/debugging/simple-app/main.py b/agenta-cli/debugging/simple-app/main.py new file mode 100644 index 0000000000..a623c787e0 --- /dev/null +++ b/agenta-cli/debugging/simple-app/main.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=800, reload=True) diff --git a/docker-compose.yml b/docker-compose.yml index 4761e583e0..ca8812de6e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,3 +1,9 @@ +version: '3.8' + +x-common-config: &common-config + networks: + - agenta-network + services: reverse-proxy: image: traefik:v2.10 @@ -193,6 +199,41 @@ services: postgres: condition: service_healthy + completion-old-sdk: + extends: + file: ./services/completion-old-sdk/docker-compose.yml + service: completion-old-sdk + + chat-old-sdk: + extends: + file: ./services/chat-old-sdk/docker-compose.yml + service: chat-old-sdk + + completion-stateless-sdk: + extends: + file: ./services/completion-stateless-sdk/docker-compose.yml + service: completion-stateless-sdk + + completion-new-sdk: + extends: + file: ./services/completion-new-sdk/docker-compose.yml + service: completion-new-sdk + + chat-new-sdk: + extends: + file: ./services/chat-new-sdk/docker-compose.yml + service: chat-new-sdk + + completion-new-sdk-prompt: + extends: + file: ./services/completion-new-sdk-prompt/docker-compose.yml + service: completion-new-sdk-prompt + + chat-new-sdk-prompt: + extends: + file: ./services/chat-new-sdk-prompt/docker-compose.yml + service: chat-new-sdk-prompt + networks: agenta-network: name: agenta-network diff --git a/services/README.md b/services/README.md new file mode 100644 index 0000000000..5098702964 --- /dev/null +++ b/services/README.md @@ -0,0 +1,33 @@ +# Agenta Services + +This directory contains various versions of Agenta's LLM services, each offering distinct capabilities and interfaces for language model interactions. + +## Service Overview + +### Legacy Services +- **completion-old-sdk**: Original completion service (as in current release) +- **chat-old-sdk**: Original chat service (as in current release) + +### New Services +All services with "new-sdk" utilize the modified SDK, which includes these changes: +- Configuration is now nested under `agenta_config` in the request body (no longer flattened) +- Implements the stateless SDK (no interface changes, but may introduce future issues in cloud deployment due to lack of testing) + +We've created two versions of each new service: +1. Original logic with new SDK: + - completion-new-sdk + - chat-new-sdk +2. New prompt object and updated logic: + - completion-new-sdk-prompt + - chat-new-sdk-prompt + +## Service Components + +Each service includes: +- Docker configuration (`docker-compose.yml`) +- REST API documentation (`.rest` files) +- Implementation code (`_app.py`) + +## Usage + +For usage examples and API details, refer to the `.rest` files in each service's directory. diff --git a/services/chat-new-sdk-prompt.rest b/services/chat-new-sdk-prompt.rest new file mode 100644 index 0000000000..462cfa83af --- /dev/null +++ b/services/chat-new-sdk-prompt.rest @@ -0,0 +1,33 @@ +### Test chat-new-sdk-prompt +POST http://localhost/chat-new-sdk-prompt/chat +Content-Type: application/json + +{ + "inputs": { + "message": "What is the capital of France?" + } +} + +### Test chat configuration with prompt +POST http://localhost/chat-new-sdk-prompt/configure +Content-Type: application/json + +{ + "model": "gpt-3.5-turbo", + "temperature": 0.7, + "max_tokens": 100, + "prompt": { + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant specializing in geography." + } + ], + "template_format": "fstring", + "response_format": { + "type": "text" + }, + "tools": null, + "tool_choice": "auto" + } +} diff --git a/services/chat-new-sdk-prompt/Dockerfile b/services/chat-new-sdk-prompt/Dockerfile new file mode 100644 index 0000000000..eeda92c313 --- /dev/null +++ b/services/chat-new-sdk-prompt/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir agenta openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai + +# Add agenta-cli to PYTHONPATH so it can find the local agenta package +ENV PYTHONPATH=/agenta-cli:$PYTHONPATH + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/chat-new-sdk-prompt/Dockerfile.prerelease b/services/chat-new-sdk-prompt/Dockerfile.prerelease new file mode 100644 index 0000000000..422c537eaf --- /dev/null +++ b/services/chat-new-sdk-prompt/Dockerfile.prerelease @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/chat-new-sdk-prompt/_app.py b/services/chat-new-sdk-prompt/_app.py new file mode 100644 index 0000000000..bc1cc08a5a --- /dev/null +++ b/services/chat-new-sdk-prompt/_app.py @@ -0,0 +1,52 @@ +from typing import Dict, List, Optional +from fastapi import HTTPException + +import agenta as ag +import litellm +from agenta.sdk.types import PromptTemplate, Message +from pydantic import BaseModel, Field + +litellm.drop_params = True +litellm.callbacks = [ag.callbacks.litellm_handler()] + +ag.init() + + +class MyConfig(BaseModel): + prompt: PromptTemplate = Field( + default=PromptTemplate( + system_prompt="You are an expert in geography", + user_prompt="What is the capital of {country}?", + ) + ) + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def generate( + messages: List[Message], + inputs: Optional[Dict[str,str]]=None, +): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + if config.prompt.input_keys is not None: + required_keys = set(config.prompt.input_keys) + provided_keys = set(inputs.keys()) + + if required_keys != provided_keys: + raise HTTPException( + status_code=422, + detail=f"Invalid inputs. Expected: {sorted(required_keys)}, got: {sorted(provided_keys)}", + ) + + if inputs is not None: + formatted_prompt = config.prompt.format(**inputs) + else: + formatted_prompt = config.prompt + openai_kwargs = formatted_prompt.to_openai_kwargs() + + if messages is not None: + openai_kwargs["messages"].extend(messages) + + response = await litellm.acompletion(**openai_kwargs) + + return response.choices[0].message diff --git a/services/chat-new-sdk-prompt/docker-compose.yml b/services/chat-new-sdk-prompt/docker-compose.yml new file mode 100644 index 0000000000..a05ab0883b --- /dev/null +++ b/services/chat-new-sdk-prompt/docker-compose.yml @@ -0,0 +1,23 @@ +services: + chat-new-sdk-prompt: + build: . + volumes: + - .:/app + - ../../agenta-cli:/agenta-cli + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + networks: + - agenta-network + labels: + - "traefik.http.routers.chat-new-sdk-prompt.rule=PathPrefix(`/chat-new-sdk-prompt/`)" + - "traefik.http.routers.chat-new-sdk-prompt.entrypoints=web" + - "traefik.http.middlewares.chat-new-sdk-prompt-strip.stripprefix.prefixes=/chat-new-sdk-prompt" + - "traefik.http.middlewares.chat-new-sdk-prompt-strip.stripprefix.forceslash=true" + - "traefik.http.routers.chat-new-sdk-prompt.middlewares=chat-new-sdk-prompt-strip" + - "traefik.http.services.chat-new-sdk-prompt.loadbalancer.server.port=80" + - "traefik.http.routers.chat-new-sdk-prompt.service=chat-new-sdk-prompt" + +networks: + agenta-network: + external: true diff --git a/services/chat-new-sdk-prompt/entrypoint.sh b/services/chat-new-sdk-prompt/entrypoint.sh new file mode 100755 index 0000000000..e9b7b1d586 --- /dev/null +++ b/services/chat-new-sdk-prompt/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +# Run uvicorn with reload watching both app and agenta-cli directories + +exec python main.py diff --git a/services/chat-new-sdk-prompt/main.py b/services/chat-new-sdk-prompt/main.py new file mode 100644 index 0000000000..c17d626bc3 --- /dev/null +++ b/services/chat-new-sdk-prompt/main.py @@ -0,0 +1,14 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run( + "agenta:app", + host="0.0.0.0", + port=80, + reload=True, + reload_dirs=[".", "/agenta-cli"], + ) diff --git a/services/chat-new-sdk-prompt/mock_litellm.py b/services/chat-new-sdk-prompt/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/chat-new-sdk-prompt/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/chat-new-sdk-prompt/supported_llm_models.py b/services/chat-new-sdk-prompt/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/chat-new-sdk-prompt/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/chat-new-sdk.rest b/services/chat-new-sdk.rest new file mode 100644 index 0000000000..7b21dc0833 --- /dev/null +++ b/services/chat-new-sdk.rest @@ -0,0 +1,102 @@ +@baseUrl = http://localhost + +@service = chat-new-sdk + +### These request can be run using the Rest Client extension in vsCode + + +### Health Check +GET {{baseUrl}}/{{service}}/health HTTP/1.1 + +### Test chat-new-sdk +POST {{baseUrl}}/{{service}}/chat HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "message": "What is the capital of France?" + } +} + +### Test chat configuration +POST {{baseUrl}}/{{service}}/configure HTTP/1.1 +Content-Type: application/json + +{ + "model": "gpt-3.5-turbo", + "temperature": 0.7, + "max_tokens": 100, + "prompt": { + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant specializing in geography." + } + ], + "template_format": "fstring" + } +} + +### Generate Chat Response +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 0.2, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You have expertise in offering technical ideas to startups.", + "inputs": [ + { + "role": "user", + "content": "What are some innovative tech solutions for a startup?" + } + ] +} + +### Playground Run +POST {{baseUrl}}/{{service}}/playground/run HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 0.2, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You have expertise in offering technical ideas to startups.", + "inputs": [ + { + "role": "user", + "content": "Suggest a tech stack for a new SaaS product" + } + ] +} + +### Generate Deployed +POST {{baseUrl}}/{{service}}/generate_deployed HTTP/1.1 +Content-Type: application/json + +{ + "inputs": [ + { + "role": "user", + "content": "Hello" + } + ], + "config": "default", + "environment": "production" +} + +### Run +POST {{baseUrl}}/{{service}}/run HTTP/1.1 +Content-Type: application/json + +{ + "inputs": [ + { + "role": "user", + "content": "Hello" + } + ], + "config": "default", + "environment": "production" +} diff --git a/services/chat-new-sdk/Dockerfile b/services/chat-new-sdk/Dockerfile new file mode 100644 index 0000000000..eeda92c313 --- /dev/null +++ b/services/chat-new-sdk/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir agenta openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai + +# Add agenta-cli to PYTHONPATH so it can find the local agenta package +ENV PYTHONPATH=/agenta-cli:$PYTHONPATH + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/chat-new-sdk/Dockerfile.prerelease b/services/chat-new-sdk/Dockerfile.prerelease new file mode 100644 index 0000000000..422c537eaf --- /dev/null +++ b/services/chat-new-sdk/Dockerfile.prerelease @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/chat-new-sdk/_app.py b/services/chat-new-sdk/_app.py new file mode 100644 index 0000000000..935118c1f5 --- /dev/null +++ b/services/chat-new-sdk/_app.py @@ -0,0 +1,65 @@ +from typing import Annotated, Any, Dict, List + +import agenta as ag +from agenta.sdk.assets import supported_llm_models +from pydantic import BaseModel, Field +import os +# Import mock if MOCK_LLM environment variable is set +if os.getenv("MOCK_LLM", True): + from mock_litellm import MockLiteLLM + + litellm = MockLiteLLM() +else: + import litellm + + litellm.drop_params = True + litellm.callbacks = [ag.callbacks.litellm_handler()] + +SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." + +ag.init() + + +class MyConfig(BaseModel): + temperature: float = Field(default=0.2, le=1, ge=0) + model: Annotated[str, ag.MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo" + ) + max_tokens: int = Field(default=-1, ge=-1, le=4000) + prompt_system: str = Field(default=SYSTEM_PROMPT) + + +@ag.instrument(spankind="llm") +async def llm_call(messages: List[Dict[str, Any]], maxtokens): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + chat_completion = await litellm.acompletion( + model=config.model, + messages=messages, + temperature=config.temperature, + max_tokens=maxtokens, + ) + token_usage = chat_completion.usage.dict() + return { + "usage": token_usage, + "message": chat_completion.choices[0].message.content, + "cost": litellm.cost_calculator.completion_cost( + completion_response=chat_completion, model=config.model + ), + } + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def chat(inputs: ag.MessagesInput = ag.MessagesInput()) -> Dict[str, Any]: + config = ag.ConfigManager.get_from_route(schema=MyConfig) + messages = [{"role": "system", "content": config.prompt_system}] + inputs + max_tokens = config.max_tokens if config.max_tokens != -1 else None + response = await llm_call( + messages=messages, + maxtokens=max_tokens, + ) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/chat-new-sdk/docker-compose.yml b/services/chat-new-sdk/docker-compose.yml new file mode 100644 index 0000000000..8b61dc78e2 --- /dev/null +++ b/services/chat-new-sdk/docker-compose.yml @@ -0,0 +1,23 @@ +services: + chat-new-sdk: + build: . + volumes: + - .:/app + - ../../agenta-cli:/agenta-cli + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + networks: + - agenta-network + labels: + - "traefik.http.routers.chat-new-sdk.rule=PathPrefix(`/chat-new-sdk/`)" + - "traefik.http.routers.chat-new-sdk.entrypoints=web" + - "traefik.http.middlewares.chat-new-sdk-strip.stripprefix.prefixes=/chat-new-sdk" + - "traefik.http.middlewares.chat-new-sdk-strip.stripprefix.forceslash=true" + - "traefik.http.routers.chat-new-sdk.middlewares=chat-new-sdk-strip" + - "traefik.http.services.chat-new-sdk.loadbalancer.server.port=80" + - "traefik.http.routers.chat-new-sdk.service=chat-new-sdk" + +networks: + agenta-network: + external: true diff --git a/services/chat-new-sdk/entrypoint.sh b/services/chat-new-sdk/entrypoint.sh new file mode 100755 index 0000000000..e9b7b1d586 --- /dev/null +++ b/services/chat-new-sdk/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +# Run uvicorn with reload watching both app and agenta-cli directories + +exec python main.py diff --git a/services/chat-new-sdk/main.py b/services/chat-new-sdk/main.py new file mode 100644 index 0000000000..c17d626bc3 --- /dev/null +++ b/services/chat-new-sdk/main.py @@ -0,0 +1,14 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run( + "agenta:app", + host="0.0.0.0", + port=80, + reload=True, + reload_dirs=[".", "/agenta-cli"], + ) diff --git a/services/chat-new-sdk/mock_litellm.py b/services/chat-new-sdk/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/chat-new-sdk/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/chat-new-sdk/supported_llm_models.py b/services/chat-new-sdk/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/chat-new-sdk/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/chat-old-sdk.rest b/services/chat-old-sdk.rest new file mode 100644 index 0000000000..d1dfd98352 --- /dev/null +++ b/services/chat-old-sdk.rest @@ -0,0 +1,73 @@ +@baseUrl = http://localhost + +@service = chat-old-sdk + +### These request can be run using the Rest Client extension in vsCode + + +### Health Check +GET {{baseUrl}}/{{service}}/health HTTP/1.1 + +### Generate Chat Response +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 0.2, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You have expertise in offering technical ideas to startups.", + "inputs": [ + { + "role": "user", + "content": "What are some innovative tech solutions for a startup?" + } + ] +} + +### Playground Run +POST {{baseUrl}}/{{service}}/playground/run HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 0.2, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You have expertise in offering technical ideas to startups.", + "inputs": [ + { + "role": "user", + "content": "Suggest a tech stack for a new SaaS product" + } + ] +} + +### Generate Deployed +POST {{baseUrl}}/{{service}}/generate_deployed HTTP/1.1 +Content-Type: application/json + +{ + "inputs": [ + { + "role": "user", + "content": "Hello" + } + ], + "config": "default", + "environment": "production" +} + +### Run +POST {{baseUrl}}/{{service}}/run HTTP/1.1 +Content-Type: application/json + +{ + "inputs": [ + { + "role": "user", + "content": "Hello" + } + ], + "config": "default", + "environment": "production" +} diff --git a/services/chat-old-sdk/Dockerfile b/services/chat-old-sdk/Dockerfile new file mode 100644 index 0000000000..d1b049161a --- /dev/null +++ b/services/chat-old-sdk/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir agenta openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai + + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/chat-old-sdk/Dockerfile.prerelease b/services/chat-old-sdk/Dockerfile.prerelease new file mode 100644 index 0000000000..e69e0248ef --- /dev/null +++ b/services/chat-old-sdk/Dockerfile.prerelease @@ -0,0 +1,17 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/chat-old-sdk/_app.py b/services/chat-old-sdk/_app.py new file mode 100644 index 0000000000..0a0ff4f0b1 --- /dev/null +++ b/services/chat-old-sdk/_app.py @@ -0,0 +1,60 @@ +from typing import Dict, Any, List +import agenta as ag +import os +from supported_llm_models import get_all_supported_llm_models + +# Import mock if MOCK_LLM environment variable is set +if os.getenv("MOCK_LLM", True): + from mock_litellm import MockLiteLLM + + litellm = MockLiteLLM() +else: + import litellm + + litellm.drop_params = True + litellm.callbacks = [ag.callbacks.litellm_handler()] + +SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.2), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=get_all_supported_llm_models() + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(SYSTEM_PROMPT), +) + + +async def llm_call(messages: List[Dict[str, Any]], max_tokens: int): + chat_completion = await litellm.acompletion( + model=ag.config.model, + messages=messages, + temperature=ag.config.temperature, + max_tokens=max_tokens, + ) + token_usage = chat_completion.usage.dict() + return { + "usage": token_usage, + "message": chat_completion.choices[0].message.content, + "cost": litellm.cost_calculator.completion_cost( + completion_response=chat_completion, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def chat(inputs: ag.MessagesInput = ag.MessagesInput()) -> Dict[str, Any]: + messages = [{"role": "system", "content": ag.config.prompt_system}] + inputs + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + response = await llm_call( + messages=messages, + max_tokens=max_tokens, + ) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/chat-old-sdk/docker-compose.yml b/services/chat-old-sdk/docker-compose.yml new file mode 100644 index 0000000000..5b1f13067c --- /dev/null +++ b/services/chat-old-sdk/docker-compose.yml @@ -0,0 +1,18 @@ +services: + chat-old-sdk: + build: . + volumes: + - .:/app + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + networks: + - agenta-network + labels: + - "traefik.http.routers.chat-old-sdk.rule=PathPrefix(`/chat-old-sdk/`)" + - "traefik.http.routers.chat-old-sdk.entrypoints=web" + - "traefik.http.middlewares.chat-old-sdk-strip.stripprefix.prefixes=/chat-old-sdk" + - "traefik.http.middlewares.chat-old-sdk-strip.stripprefix.forceslash=true" + - "traefik.http.routers.chat-old-sdk.middlewares=chat-old-sdk-strip" + - "traefik.http.services.chat-old-sdk.loadbalancer.server.port=80" + - "traefik.http.routers.chat-old-sdk.service=chat-old-sdk" diff --git a/services/chat-old-sdk/entrypoint.sh b/services/chat-old-sdk/entrypoint.sh new file mode 100755 index 0000000000..3c6b353144 --- /dev/null +++ b/services/chat-old-sdk/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +exec python main.py diff --git a/services/chat-old-sdk/main.py b/services/chat-old-sdk/main.py new file mode 100644 index 0000000000..aa26ddc352 --- /dev/null +++ b/services/chat-old-sdk/main.py @@ -0,0 +1,9 @@ +import re +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80, reload=True) diff --git a/services/chat-old-sdk/mock_litellm.py b/services/chat-old-sdk/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/chat-old-sdk/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/chat-old-sdk/supported_llm_models.py b/services/chat-old-sdk/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/chat-old-sdk/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/completion-new-sdk-prompt-local.rest b/services/completion-new-sdk-prompt-local.rest new file mode 100644 index 0000000000..1ab20eab42 --- /dev/null +++ b/services/completion-new-sdk-prompt-local.rest @@ -0,0 +1,313 @@ +@baseUrl = http://localhost:8000 +@service = completion-new-sdk-prompt + +### These request can be run using the Rest Client extension in vsCode + +### Health Check +GET {{baseUrl}}/health HTTP/1.1 + +### OpenAPI +GET {{baseUrl}}/openapi.json HTTP/1.1 + +### Basic Text Response - Geography Assistant +POST {{baseUrl}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "response_format": { + "type": "text" + } + }, + "messages": [ + { + "content": "You are an expert in geography.", + "role": "system" + }, + { + "content": "What is the capital of {country}?", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "country": "France" + } +} + +### JSON Object Response - Movie Information +POST {{baseUrl}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "response_format": { + "type": "json_object" + } + }, + "messages": [ + { + "content": "You are a movie database assistant. Provide movie information in JSON format.", + "role": "system" + }, + { + "content": "Give me information about the movie {title}", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "title": "The Matrix" + } +} + +### JSON Schema Response - Recipe Generator +POST {{baseUrl}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "recipe", + "description": "A recipe with ingredients and instructions", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the recipe" + }, + "ingredients": { + "type": "array", + "items": { + "type": "object", + "properties": { + "item": { + "type": "string", + "description": "Name of the ingredient" + }, + "amount": { + "type": "string", + "description": "Amount of the ingredient needed" + } + }, + "required": ["item", "amount"] + }, + "description": "List of ingredients needed" + }, + "instructions": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step cooking instructions" + }, + "prep_time": { + "type": "string", + "description": "Preparation time" + }, + "cook_time": { + "type": "string", + "description": "Cooking time" + } + }, + "required": ["name", "ingredients", "instructions", "prep_time", "cook_time"] + } + } + } + }, + "messages": [ + { + "content": "You are a professional chef. Generate detailed recipes in a structured format.", + "role": "system" + }, + { + "content": "Give me a recipe for {dish}", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "dish": "chocolate chip cookies" + } +} + +### Function Calling with Tools - Weather Assistant +POST {{baseUrl}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "The unit of temperature to use" + } + }, + "required": ["location", "unit"] + } + } + } + ], + "tool_choice": "auto" + }, + "messages": [ + { + "content": "You are a weather assistant. Use the provided function to get weather information.", + "role": "system" + }, + { + "content": "What's the current weather in {city}? Please provide the temperature in {unit}.", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "city": "San Francisco, CA", + "unit": "celsius" + } +} + +### Function Calling with Multiple Tools - Smart Home Assistant +POST {{baseUrl}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "tools": [ + { + "type": "function", + "function": { + "name": "get_device_status", + "description": "Get the current status of a smart home device", + "parameters": { + "type": "object", + "properties": { + "device_id": { + "type": "string", + "description": "The ID of the device" + }, + "device_type": { + "type": "string", + "enum": ["light", "thermostat", "camera"], + "description": "The type of device" + } + }, + "required": ["device_id", "device_type"] + } + } + }, + { + "type": "function", + "function": { + "name": "control_device", + "description": "Control a smart home device", + "parameters": { + "type": "object", + "properties": { + "device_id": { + "type": "string", + "description": "The ID of the device" + }, + "action": { + "type": "string", + "enum": ["turn_on", "turn_off", "set_temperature", "set_brightness"], + "description": "The action to perform" + }, + "value": { + "type": "number", + "description": "The value for the action (e.g., temperature or brightness level)" + } + }, + "required": ["device_id", "action"] + } + } + } + ], + "tool_choice": "auto" + }, + "messages": [ + { + "content": "You are a smart home assistant. Use the provided functions to check and control devices.", + "role": "system" + }, + { + "content": "{command}", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "command": "Check the status of the living room thermostat (device_id: therm_01) and if it's below 20°C, set it to 22°C." + } +} + +### Example that will trigger 422 - Mismatched Input Keys +POST {{baseUrl}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4" + }, + "messages": [ + { + "content": "You are an expert in geography.", + "role": "system" + }, + { + "content": "What is the capital of {country} and what is the population of {city}?", + "role": "user" + } + ], + "template_format": "fstring", + "input_keys": ["country", "city"] + } + }, + "inputs": { + "country": "France", + "population": "Paris" + } +} diff --git a/services/completion-new-sdk-prompt.rest b/services/completion-new-sdk-prompt.rest new file mode 100644 index 0000000000..12366f3da9 --- /dev/null +++ b/services/completion-new-sdk-prompt.rest @@ -0,0 +1,283 @@ +@baseUrl = http://localhost +@service = completion-new-sdk-prompt + +### These request can be run using the Rest Client extension in vsCode + +### Health Check +GET {{baseUrl}}/{{service}}/health HTTP/1.1 + +### OpenAPI +GET {{baseUrl}}/{{service}}/openapi.json HTTP/1.1 + +### Basic Text Response - Geography Assistant +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "response_format": { + "type": "text" + } + }, + "messages": [ + { + "content": "You are an expert in geography.", + "role": "system" + }, + { + "content": "What is the capital of {country}?", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "country": "France" + } +} + +### JSON Object Response - Movie Information +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "response_format": { + "type": "json_object" + } + }, + "messages": [ + { + "content": "You are a movie database assistant. Provide movie information in JSON format.", + "role": "system" + }, + { + "content": "Give me information about the movie {title}", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "title": "The Matrix" + } +} + +### JSON Schema Response - Recipe Generator +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "recipe", + "description": "A recipe with ingredients and instructions", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the recipe" + }, + "ingredients": { + "type": "array", + "items": { + "type": "object", + "properties": { + "item": { + "type": "string", + "description": "Name of the ingredient" + }, + "amount": { + "type": "string", + "description": "Amount of the ingredient needed" + } + }, + "required": ["item", "amount"] + }, + "description": "List of ingredients needed" + }, + "instructions": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step cooking instructions" + }, + "prep_time": { + "type": "string", + "description": "Preparation time" + }, + "cook_time": { + "type": "string", + "description": "Cooking time" + } + }, + "required": ["name", "ingredients", "instructions", "prep_time", "cook_time"] + } + } + } + }, + "messages": [ + { + "content": "You are a professional chef. Generate detailed recipes in a structured format.", + "role": "system" + }, + { + "content": "Give me a recipe for {dish}", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "dish": "chocolate chip cookies" + } +} + +### Function Calling with Tools - Weather Assistant +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "The unit of temperature to use" + } + }, + "required": ["location", "unit"] + } + } + } + ], + "tool_choice": "auto" + }, + "messages": [ + { + "content": "You are a weather assistant. Use the provided function to get weather information.", + "role": "system" + }, + { + "content": "What's the current weather in {city}? Please provide the temperature in {unit}.", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "city": "San Francisco, CA", + "unit": "celsius" + } +} + +### Function Calling with Multiple Tools - Smart Home Assistant +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "prompt": { + "llm_config": { + "model": "gpt-4", + "tools": [ + { + "type": "function", + "function": { + "name": "get_device_status", + "description": "Get the current status of a smart home device", + "parameters": { + "type": "object", + "properties": { + "device_id": { + "type": "string", + "description": "The ID of the device" + }, + "device_type": { + "type": "string", + "enum": ["light", "thermostat", "camera"], + "description": "The type of device" + } + }, + "required": ["device_id", "device_type"] + } + } + }, + { + "type": "function", + "function": { + "name": "control_device", + "description": "Control a smart home device", + "parameters": { + "type": "object", + "properties": { + "device_id": { + "type": "string", + "description": "The ID of the device" + }, + "action": { + "type": "string", + "enum": ["turn_on", "turn_off", "set_temperature", "set_brightness"], + "description": "The action to perform" + }, + "value": { + "type": "number", + "description": "The value for the action (e.g., temperature or brightness level)" + } + }, + "required": ["device_id", "action"] + } + } + } + ], + "tool_choice": "auto" + }, + "messages": [ + { + "content": "You are a smart home assistant. Use the provided functions to check and control devices.", + "role": "system" + }, + { + "content": "{command}", + "role": "user" + } + ], + "template_format": "fstring" + } + }, + "inputs": { + "command": "Check the status of the living room thermostat (device_id: therm_01) and if it's below 20°C, set it to 22°C." + } +} diff --git a/services/completion-new-sdk-prompt/Dockerfile b/services/completion-new-sdk-prompt/Dockerfile new file mode 100644 index 0000000000..eeda92c313 --- /dev/null +++ b/services/completion-new-sdk-prompt/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir agenta openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai + +# Add agenta-cli to PYTHONPATH so it can find the local agenta package +ENV PYTHONPATH=/agenta-cli:$PYTHONPATH + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-new-sdk-prompt/Dockerfile.prerelease b/services/completion-new-sdk-prompt/Dockerfile.prerelease new file mode 100644 index 0000000000..422c537eaf --- /dev/null +++ b/services/completion-new-sdk-prompt/Dockerfile.prerelease @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-new-sdk-prompt/__init__.py b/services/completion-new-sdk-prompt/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/services/completion-new-sdk-prompt/__init__.py @@ -0,0 +1 @@ + diff --git a/services/completion-new-sdk-prompt/_app.py b/services/completion-new-sdk-prompt/_app.py new file mode 100644 index 0000000000..9d9ed65b3e --- /dev/null +++ b/services/completion-new-sdk-prompt/_app.py @@ -0,0 +1,51 @@ +from typing import Dict +from fastapi import HTTPException + +import agenta as ag +import litellm +from agenta.sdk.types import PromptTemplate +from pydantic import BaseModel, Field + +litellm.drop_params = True +litellm.callbacks = [ag.callbacks.litellm_handler()] + +ag.init() + + +class MyConfig(BaseModel): + prompt: PromptTemplate = Field( + default=PromptTemplate( + system_prompt="You are an expert in geography", + user_prompt="What is the capital of {country}?", + ) + ) + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def generate( + inputs: Dict[str, str], +): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + if config.prompt.input_keys is not None: + required_keys = set(config.prompt.input_keys) + provided_keys = set(inputs.keys()) + + if required_keys != provided_keys: + raise HTTPException( + status_code=422, + detail=f"Invalid inputs. Expected: {sorted(required_keys)}, got: {sorted(provided_keys)}", + ) + response = await litellm.acompletion( + **config.prompt.format(**inputs).to_openai_kwargs() + ) + message = response.choices[0].message + + if message.content is not None: + return message.content + if hasattr(message, "refusal") and message.refusal is not None: + return message.refusal + if hasattr(message, "parsed") and message.parsed is not None: + return message.parsed + if hasattr(message, "tool_calls") and message.tool_calls is not None: + return [tool_call.dict() for tool_call in message.tool_calls] diff --git a/services/completion-new-sdk-prompt/docker-compose.yml b/services/completion-new-sdk-prompt/docker-compose.yml new file mode 100644 index 0000000000..623442d7aa --- /dev/null +++ b/services/completion-new-sdk-prompt/docker-compose.yml @@ -0,0 +1,25 @@ +services: + completion-new-sdk-prompt: + build: . + volumes: + - .:/app + - ../../agenta-cli:/agenta-cli + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + - OPENAI_API_KEY=sk-xxxx + + networks: + - agenta-network + labels: + - "traefik.http.routers.completion-new-sdk-prompt.rule=PathPrefix(`/completion-new-sdk-prompt/`)" + - "traefik.http.routers.completion-new-sdk-prompt.entrypoints=web" + - "traefik.http.middlewares.completion-new-sdk-prompt-strip.stripprefix.prefixes=/completion-new-sdk-prompt" + - "traefik.http.middlewares.completion-new-sdk-prompt-strip.stripprefix.forceslash=true" + - "traefik.http.routers.completion-new-sdk-prompt.middlewares=completion-new-sdk-prompt-strip" + - "traefik.http.services.completion-new-sdk-prompt.loadbalancer.server.port=80" + - "traefik.http.routers.completion-new-sdk-prompt.service=completion-new-sdk-prompt" + +networks: + agenta-network: + external: true diff --git a/services/completion-new-sdk-prompt/entrypoint.sh b/services/completion-new-sdk-prompt/entrypoint.sh new file mode 100755 index 0000000000..e9b7b1d586 --- /dev/null +++ b/services/completion-new-sdk-prompt/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +# Run uvicorn with reload watching both app and agenta-cli directories + +exec python main.py diff --git a/services/completion-new-sdk-prompt/main.py b/services/completion-new-sdk-prompt/main.py new file mode 100644 index 0000000000..c17d626bc3 --- /dev/null +++ b/services/completion-new-sdk-prompt/main.py @@ -0,0 +1,14 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run( + "agenta:app", + host="0.0.0.0", + port=80, + reload=True, + reload_dirs=[".", "/agenta-cli"], + ) diff --git a/services/completion-new-sdk-prompt/main_local.py b/services/completion-new-sdk-prompt/main_local.py new file mode 100644 index 0000000000..16b9f2e9cf --- /dev/null +++ b/services/completion-new-sdk-prompt/main_local.py @@ -0,0 +1,18 @@ +from uvicorn import run +import os + +os.environ["AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED"] = "True" +os.environ["AGENTA_HOST"] = "http://host.docker.internal" + +import agenta +import _app # This will register the routes with the FastAPI application + + +if __name__ == "__main__": + run( + "agenta:app", + host="0.0.0.0", + port=8000, + reload=True, + reload_dirs=[".", "/Users/mahmoudmabrouk/agenta/code/agenta-core/agenta-cli"], + ) diff --git a/services/completion-new-sdk-prompt/mock_litellm.py b/services/completion-new-sdk-prompt/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/completion-new-sdk-prompt/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/completion-new-sdk-prompt/supported_llm_models.py b/services/completion-new-sdk-prompt/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/completion-new-sdk-prompt/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/completion-new-sdk.rest b/services/completion-new-sdk.rest new file mode 100644 index 0000000000..af8ddf26af --- /dev/null +++ b/services/completion-new-sdk.rest @@ -0,0 +1,76 @@ +@baseUrl = http://localhost +@service = completion-live-sdk + +### These request can be run using the Rest Client extension in vsCode + +### Health Check +GET {{baseUrl}}/{{service}}/health HTTP/1.1 + +### OpenAPI +GET {{baseUrl}}/{{service}}/openapi.json HTTP/1.1 + +### Generate +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "temperature": 0.7, + "model": "gpt-3.5-turbo-16k", + "max_tokens": 256, + "prompt_system": "You are an expert in linguistics.", + "prompt_user": "What is the meaning of {word}?", + "top_p": 0.95, + "frequence_penalty": 0.5, + "presence_penalty": 0.5, + "force_json": true + }, + "inputs": { + "word": "language" + } +} + +### Playground Run +POST {{baseUrl}}/{{service}}/playground/run HTTP/1.1 +Content-Type: application/json + +{ + "agenta_config": { + "temperature": 0.7, + "model": "gpt-3.5-turbo-16k", + "max_tokens": 256, + "prompt_system": "You are an expert in linguistics.", + "prompt_user": "What is the meaning of {word}?", + "top_p": 0.95, + "frequence_penalty": 0.5, + "presence_penalty": 0.5, + "force_json": true + }, + "inputs": { + "word": "language" + } +} + +### Generate Deployed +POST {{baseUrl}}/{{service}}/generate_deployed HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "country": "Italy" + }, + "config": "default", + "environment": "production" +} + +### Run +POST {{baseUrl}}/{{service}}/run HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "country": "Germany" + }, + "config": "default", + "environment": "production" +} diff --git a/services/completion-new-sdk/Dockerfile b/services/completion-new-sdk/Dockerfile new file mode 100644 index 0000000000..eeda92c313 --- /dev/null +++ b/services/completion-new-sdk/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir agenta openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai + +# Add agenta-cli to PYTHONPATH so it can find the local agenta package +ENV PYTHONPATH=/agenta-cli:$PYTHONPATH + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-new-sdk/Dockerfile.prerelease b/services/completion-new-sdk/Dockerfile.prerelease new file mode 100644 index 0000000000..422c537eaf --- /dev/null +++ b/services/completion-new-sdk/Dockerfile.prerelease @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-new-sdk/_app.py b/services/completion-new-sdk/_app.py new file mode 100644 index 0000000000..60cef2965c --- /dev/null +++ b/services/completion-new-sdk/_app.py @@ -0,0 +1,113 @@ +from typing import Annotated + +import agenta as ag +from agenta.sdk.assets import supported_llm_models +from pydantic import BaseModel, Field + +import os +# Import mock if MOCK_LLM environment variable is set +if os.getenv("MOCK_LLM", True): + from mock_litellm import MockLiteLLM + + litellm = MockLiteLLM() +else: + import litellm + + litellm.drop_params = True + litellm.callbacks = [ag.callbacks.litellm_handler()] + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init() + + +class MyConfig(BaseModel): + temperature: float = Field(default=1, ge=0.0, le=2.0) + model: Annotated[str, ag.MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo" + ) + max_tokens: int = Field(default=-1, ge=-1, le=4000) + prompt_system: str = Field(default=prompts["system_prompt"]) + prompt_user: str = Field(default=prompts["user_prompt"]) + top_p: float = Field(default=1) + frequence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0) + presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0) + force_json: bool = Field(default=False) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + response_format = ( + {"type": "json_object"} + if config.force_json and config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + + max_tokens = config.max_tokens if config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = config.frequence_penalty + completion_params["presence_penalty"] = config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": config.temperature, + "max_tokens": max_tokens, + "top_p": config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=config.model + ), + } + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + print("popo", config) + try: + prompt_user = config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = config.prompt_user + try: + prompt_system = config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = config.prompt_system + + # SET MAX TOKENS - via completion() + if config.force_json and config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-new-sdk/docker-compose.yml b/services/completion-new-sdk/docker-compose.yml new file mode 100644 index 0000000000..c116912817 --- /dev/null +++ b/services/completion-new-sdk/docker-compose.yml @@ -0,0 +1,23 @@ +services: + completion-new-sdk: + build: . + volumes: + - .:/app + - ../../agenta-cli:/agenta-cli + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + networks: + - agenta-network + labels: + - "traefik.http.routers.completion-new-sdk.rule=PathPrefix(`/completion-new-sdk/`)" + - "traefik.http.routers.completion-new-sdk.entrypoints=web" + - "traefik.http.middlewares.completion-new-sdk-strip.stripprefix.prefixes=/completion-new-sdk" + - "traefik.http.middlewares.completion-new-sdk-strip.stripprefix.forceslash=true" + - "traefik.http.routers.completion-new-sdk.middlewares=completion-new-sdk-strip" + - "traefik.http.services.completion-new-sdk.loadbalancer.server.port=80" + - "traefik.http.routers.completion-new-sdk.service=completion-new-sdk" + +networks: + agenta-network: + external: true diff --git a/services/completion-new-sdk/entrypoint.sh b/services/completion-new-sdk/entrypoint.sh new file mode 100755 index 0000000000..e9b7b1d586 --- /dev/null +++ b/services/completion-new-sdk/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +# Run uvicorn with reload watching both app and agenta-cli directories + +exec python main.py diff --git a/services/completion-new-sdk/main.py b/services/completion-new-sdk/main.py new file mode 100644 index 0000000000..c17d626bc3 --- /dev/null +++ b/services/completion-new-sdk/main.py @@ -0,0 +1,14 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run( + "agenta:app", + host="0.0.0.0", + port=80, + reload=True, + reload_dirs=[".", "/agenta-cli"], + ) diff --git a/services/completion-new-sdk/mock_litellm.py b/services/completion-new-sdk/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/completion-new-sdk/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/completion-new-sdk/supported_llm_models.py b/services/completion-new-sdk/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/completion-new-sdk/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/completion-old-sdk.rest b/services/completion-old-sdk.rest new file mode 100644 index 0000000000..e817f9c037 --- /dev/null +++ b/services/completion-old-sdk.rest @@ -0,0 +1,72 @@ +@baseUrl = http://localhost +@service = completion-old-sdk + +### These request can be run using the Rest Client extension in vsCode + +### Health Check +GET {{baseUrl}}/{{service}}/health HTTP/1.1 + +### OpenAPI +GET {{baseUrl}}/{{service}}/openapi.json HTTP/1.1 + +### Generate +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 1.0, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You are an expert in geography.", + "prompt_user": "What is the capital of {country}?", + "top_p": 1.0, + "frequence_penalty": 0.0, + "presence_penalty": 0.0, + "force_json": false, + "inputs": { + "country": "France" + } +} + +### Playground Run +POST {{baseUrl}}/{{service}}/playground/run HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 1.0, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You are an expert in geography.", + "prompt_user": "What is the capital of {country}?", + "top_p": 1.0, + "frequence_penalty": 0.0, + "presence_penalty": 0.0, + "force_json": false, + "inputs": { + "country": "Spain" + } +} + +### Generate Deployed +POST {{baseUrl}}/{{service}}/generate_deployed HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "country": "Italy" + }, + "config": "default", + "environment": "production" +} + +### Run +POST {{baseUrl}}/{{service}}/run HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "country": "Germany" + }, + "config": "default", + "environment": "production" +} diff --git a/services/completion-old-sdk/Dockerfile b/services/completion-old-sdk/Dockerfile new file mode 100644 index 0000000000..d1b049161a --- /dev/null +++ b/services/completion-old-sdk/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir agenta openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai + + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-old-sdk/Dockerfile.prerelease b/services/completion-old-sdk/Dockerfile.prerelease new file mode 100644 index 0000000000..422c537eaf --- /dev/null +++ b/services/completion-old-sdk/Dockerfile.prerelease @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-old-sdk/_app.py b/services/completion-old-sdk/_app.py new file mode 100644 index 0000000000..430aed8434 --- /dev/null +++ b/services/completion-old-sdk/_app.py @@ -0,0 +1,106 @@ +import agenta as ag +from supported_llm_models import get_all_supported_llm_models + +import os + +# Import mock if MOCK_LLM environment variable is set +if os.getenv("MOCK_LLM", True): + from mock_litellm import MockLiteLLM + + litellm = MockLiteLLM() +else: + import litellm + + litellm.drop_params = True + litellm.callbacks = [ag.callbacks.litellm_handler()] + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init() +ag.config.default( + temperature=ag.FloatParam(default=1, minval=0.0, maxval=2.0), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=get_all_supported_llm_models() + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(prompts["system_prompt"]), + prompt_user=ag.TextParam(prompts["user_prompt"]), + top_p=ag.FloatParam(1), + frequence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + presence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + force_json=ag.BinaryParam(False), +) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + response_format = ( + {"type": "json_object"} + if ag.config.force_json and ag.config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if ag.config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = ag.config.frequence_penalty + completion_params["presence_penalty"] = ag.config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": ag.config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": ag.config.temperature, + "max_tokens": max_tokens, + "top_p": ag.config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + try: + prompt_user = ag.config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = ag.config.prompt_user + try: + prompt_system = ag.config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = ag.config.prompt_system + + # SET MAX TOKENS - via completion() + if ag.config.force_json and ag.config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(ag.config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-old-sdk/docker-compose.yml b/services/completion-old-sdk/docker-compose.yml new file mode 100644 index 0000000000..c07edac639 --- /dev/null +++ b/services/completion-old-sdk/docker-compose.yml @@ -0,0 +1,22 @@ +services: + completion-old-sdk: + build: . + volumes: + - .:/app + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + networks: + - agenta-network + labels: + - "traefik.http.routers.completion-old-sdk.rule=PathPrefix(`/completion-old-sdk/`)" + - "traefik.http.routers.completion-old-sdk.entrypoints=web" + - "traefik.http.middlewares.completion-old-sdk-strip.stripprefix.prefixes=/completion-old-sdk" + - "traefik.http.middlewares.completion-old-sdk-strip.stripprefix.forceslash=true" + - "traefik.http.routers.completion-old-sdk.middlewares=completion-old-sdk-strip" + - "traefik.http.services.completion-old-sdk.loadbalancer.server.port=80" + - "traefik.http.routers.completion-old-sdk.service=completion-old-sdk" + +networks: + agenta-network: + external: true diff --git a/services/completion-old-sdk/entrypoint.sh b/services/completion-old-sdk/entrypoint.sh new file mode 100755 index 0000000000..3c6b353144 --- /dev/null +++ b/services/completion-old-sdk/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +exec python main.py diff --git a/services/completion-old-sdk/main.py b/services/completion-old-sdk/main.py new file mode 100644 index 0000000000..81d1b6c73d --- /dev/null +++ b/services/completion-old-sdk/main.py @@ -0,0 +1,8 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80, reload=True) diff --git a/services/completion-old-sdk/mock_litellm.py b/services/completion-old-sdk/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/completion-old-sdk/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/completion-old-sdk/supported_llm_models.py b/services/completion-old-sdk/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/completion-old-sdk/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/completion-stateless-sdk.rest b/services/completion-stateless-sdk.rest new file mode 100644 index 0000000000..8867777c14 --- /dev/null +++ b/services/completion-stateless-sdk.rest @@ -0,0 +1,91 @@ +@baseUrl = http://localhost +@service = completion-stateless-sdk + +### These request can be run using the Rest Client extension in vsCode + +### Health Check +GET {{baseUrl}}/{{service}}/health HTTP/1.1 + +### OpenAPI +GET {{baseUrl}}/{{service}}/openapi.json HTTP/1.1 + +### Generate +POST {{baseUrl}}/{{service}}/generate HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 1.0, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You are an expert in geography.", + "prompt_user": "What is the capital of {country}?", + "top_p": 1.0, + "frequence_penalty": 0.0, + "presence_penalty": 0.0, + "force_json": false, + "inputs": { + "country": "France" + } +} + +### Test +POST {{baseUrl}}/{{service}}/test HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 1.0, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You are an expert in geography.", + "prompt_user": "What is the capital of {country}?", + "top_p": 1.0, + "frequence_penalty": 0.0, + "presence_penalty": 0.0, + "force_json": false, + "inputs": { + "country": "France" + } +} + +### Playground Run +POST {{baseUrl}}/{{service}}/playground/run HTTP/1.1 +Content-Type: application/json + +{ + "temperature": 1.0, + "model": "gpt-3.5-turbo", + "max_tokens": -1, + "prompt_system": "You are an expert in geography.", + "prompt_user": "What is the capital of {country}?", + "top_p": 1.0, + "frequence_penalty": 0.0, + "presence_penalty": 0.0, + "force_json": false, + "inputs": { + "country": "Spain" + } +} + +### Generate Deployed +POST {{baseUrl}}/{{service}}/generate_deployed HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "country": "Italy" + }, + "config": "default", + "environment": "production" +} + +### Run +POST {{baseUrl}}/{{service}}/run HTTP/1.1 +Content-Type: application/json + +{ + "inputs": { + "country": "Germany" + }, + "config": "default", + "environment": "production" +} diff --git a/services/completion-stateless-sdk/Dockerfile b/services/completion-stateless-sdk/Dockerfile new file mode 100644 index 0000000000..d808e99946 --- /dev/null +++ b/services/completion-stateless-sdk/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai agenta + + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-stateless-sdk/Dockerfile.prerelease b/services/completion-stateless-sdk/Dockerfile.prerelease new file mode 100644 index 0000000000..422c537eaf --- /dev/null +++ b/services/completion-stateless-sdk/Dockerfile.prerelease @@ -0,0 +1,16 @@ +FROM python:3.10-slim + +ARG ROOT_PATH=/ +ENV ROOT_PATH=${ROOT_PATH} + +WORKDIR /app + +COPY . . + +RUN pip install --upgrade pip \ + && pip install --no-cache-dir openai python-dotenv uvicorn "litellm>=1.0,<2.0" google-generativeai \ + && pip install --no-cache-dir --pre agenta + +EXPOSE 80 + +CMD ["./entrypoint.sh"] diff --git a/services/completion-stateless-sdk/_app.py b/services/completion-stateless-sdk/_app.py new file mode 100644 index 0000000000..430aed8434 --- /dev/null +++ b/services/completion-stateless-sdk/_app.py @@ -0,0 +1,106 @@ +import agenta as ag +from supported_llm_models import get_all_supported_llm_models + +import os + +# Import mock if MOCK_LLM environment variable is set +if os.getenv("MOCK_LLM", True): + from mock_litellm import MockLiteLLM + + litellm = MockLiteLLM() +else: + import litellm + + litellm.drop_params = True + litellm.callbacks = [ag.callbacks.litellm_handler()] + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init() +ag.config.default( + temperature=ag.FloatParam(default=1, minval=0.0, maxval=2.0), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=get_all_supported_llm_models() + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(prompts["system_prompt"]), + prompt_user=ag.TextParam(prompts["user_prompt"]), + top_p=ag.FloatParam(1), + frequence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + presence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + force_json=ag.BinaryParam(False), +) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + response_format = ( + {"type": "json_object"} + if ag.config.force_json and ag.config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if ag.config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = ag.config.frequence_penalty + completion_params["presence_penalty"] = ag.config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": ag.config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": ag.config.temperature, + "max_tokens": max_tokens, + "top_p": ag.config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + try: + prompt_user = ag.config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = ag.config.prompt_user + try: + prompt_system = ag.config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = ag.config.prompt_system + + # SET MAX TOKENS - via completion() + if ag.config.force_json and ag.config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(ag.config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-stateless-sdk/agenta/__init__.py b/services/completion-stateless-sdk/agenta/__init__.py new file mode 100644 index 0000000000..53600a4a1e --- /dev/null +++ b/services/completion-stateless-sdk/agenta/__init__.py @@ -0,0 +1,74 @@ +from typing import Any, Callable, Optional + +from .sdk.utils.preinit import PreInitObject + +import agenta.client.backend.types as client_types # pylint: disable=wrong-import-order + +from .sdk.types import ( + DictInput, + MultipleChoice, + FloatParam, + InFile, + IntParam, + MultipleChoiceParam, + GroupedMultipleChoiceParam, + MessagesInput, + TextParam, + FileInputURL, + BinaryParam, + Prompt, +) + +from .sdk.utils.logging import log as logging +from .sdk.tracing import Tracing, get_tracer +from .sdk.decorators.tracing import instrument +from .sdk.tracing.conventions import Reference +from .sdk.decorators.routing import entrypoint, app, route +from .sdk.agenta_init import Config, AgentaSingleton, init as _init +from .sdk.utils.costs import calculate_token_usage +from .sdk.client import Agenta +from .sdk.litellm import litellm as callbacks +from .sdk.managers.vault import VaultManager +from .sdk.managers.config import ConfigManager +from .sdk.managers.variant import VariantManager +from .sdk.managers.deployment import DeploymentManager +from .sdk import assets as assets +from .sdk import tracer + +config = PreInitObject("agenta.config", Config) +DEFAULT_AGENTA_SINGLETON_INSTANCE = AgentaSingleton() + +types = client_types + +api = None +async_api = None + +tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore +tracer = get_tracer(tracing) + + +def init( + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, +): + global api, async_api, tracing, tracer # pylint: disable=global-statement + + _init( + host=host, + api_key=api_key, + config_fname=config_fname, + redact=redact, + redact_on_error=redact_on_error, + app_id=app_id, + ) + + api = DEFAULT_AGENTA_SINGLETON_INSTANCE.api # type: ignore + async_api = DEFAULT_AGENTA_SINGLETON_INSTANCE.async_api # type: ignore + + tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore + tracer = get_tracer(tracing) diff --git a/services/completion-stateless-sdk/agenta/cli/evaluation_commands.py b/services/completion-stateless-sdk/agenta/cli/evaluation_commands.py new file mode 100644 index 0000000000..76e00f9694 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/cli/evaluation_commands.py @@ -0,0 +1,22 @@ +import click +from agenta.client import client + + +@click.group() +def evaluation(): + """Commands for evaluations.""" + pass + + +# TODO: Remove hardcoded values +@evaluation.command(name="run") +def run_evaluation_cli(): + """Run an evaluation.""" + + try: + client.run_evaluation( + app_name="sss", + host="http://localhost", + ) + except Exception as ex: + click.echo(click.style(f"Error while running evaluation: {ex}", fg="red")) diff --git a/services/completion-stateless-sdk/agenta/cli/helper.py b/services/completion-stateless-sdk/agenta/cli/helper.py new file mode 100644 index 0000000000..15c945218d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/cli/helper.py @@ -0,0 +1,202 @@ +import os +import sys +import toml +import click +import questionary +from pathlib import Path +from typing import Any, List, MutableMapping +from agenta.client.api_models import AppVariant + + +from typing import Any, Optional +from pathlib import Path +import toml + +from agenta.client.backend.client import AgentaApi + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") +POSTHOG_KEY = os.environ.get( + "POSTHOG_KEY", "phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7" +) + + +def get_global_config(var_name: str) -> Optional[Any]: + """ + Get the value of a global configuration variable. + + Args: + var_name: the name of the variable to get + + Returns: + the value of the variable, or None if it doesn't exist + """ + agenta_dir = Path.home() / ".agenta" + if not agenta_dir.exists(): + return None + agenta_config_file = agenta_dir / "config.toml" + if not agenta_config_file.exists(): + return None + global_config = toml.load(agenta_config_file) + if var_name not in global_config: + return None + return global_config[var_name] + + +def set_global_config(var_name: str, var_value: Any) -> None: + """ + Set the value of a global configuration variable. + + Args: + var_name: the name of the variable to set + var_value: the value to set the variable to + """ + agenta_dir = Path.home() / ".agenta" + if not agenta_dir.exists(): + agenta_dir.mkdir(exist_ok=True) + agenta_config_file = agenta_dir / "config.toml" + if not agenta_config_file.exists(): + config = {} + with agenta_config_file.open("w") as config_file: + toml.dump(config, config_file) + global_config = toml.load(agenta_config_file) + global_config[var_name] = var_value + with open(agenta_config_file, "w") as config_file: + toml.dump(global_config, config_file) + + +def get_api_key(backend_host: str) -> str: + """ + Retrieve or request the API key for accessing the Agenta platform. + + This function first looks for an existing API key in the global config file. + If found, it prompts the user to confirm whether they'd like to use that key. + If not found, it asks the user to input a new key. + + Args: + backend_host (str): The URL of the backend host. + + Returns: + str: The API key to be used for accessing the Agenta platform. + + Raises: + SystemExit: If the user cancels the input by pressing Ctrl+C. + """ + + api_key = get_global_config("api_key") + if api_key: + # API key exists in the config file, ask for confirmation + confirm_api_key = questionary.confirm( + f"API Key found: {api_key}\nDo you want to use this API Key?" + ).ask() + + if confirm_api_key: + return api_key + elif confirm_api_key is None: # User pressed Ctrl+C + sys.exit(0) + + api_key = questionary.text( + f"(You can get your API Key here: {backend_host}/settings?tab=apiKeys) " + "Please provide your API key:" + ).ask() + + if api_key: + set_global_config("api_key", api_key.strip()) + + return api_key + elif api_key is None: # User pressed Ctrl+C + sys.exit(0) + + +def init_telemetry_config() -> None: + if ( + get_global_config("telemetry_tracking_enabled") is None + or get_global_config("telemetry_api_key") is None + ): + set_global_config("telemetry_tracking_enabled", True) + set_global_config( + "telemetry_api_key", + POSTHOG_KEY, + ) + + +def update_variants_from_backend( + app_id: str, + config: MutableMapping[str, Any], + host: str, + api_key: str = None, +) -> MutableMapping[str, Any]: + """Reads the list of variants from the backend and updates the config accordingly + + Arguments: + app_id -- the app id + config -- the config loaded using toml.load + api_key -- the api key to use for authentication + + Returns: + a new config object later to be saved using toml.dump(config, config_file.open('w')) + """ + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + try: + variants: List[AppVariant] = client.apps.list_app_variants(app_id=app_id) + except Exception as ex: + raise ex + + config["variants"] = [variant.variant_name for variant in variants] + config["variant_ids"] = [variant.variant_id for variant in variants] + return config + + +def update_config_from_backend(config_file: Path, host: str): + """Updates the config file with new information from the backend + + Arguments: + config_file -- the path to the config file + """ + assert config_file.exists(), "Config file does not exist!" + config = toml.load(config_file) + app_id = config["app_id"] + api_key = config.get("api_key", "") + if "variants" not in config: + config["variants"] = [] + if "variant_ids" not in config: + config["variant_ids"] = [] + config = update_variants_from_backend(app_id, config, host, api_key) + toml.dump(config, config_file.open("w")) + + +def display_app_variant(variant: AppVariant): + """Prints a variant nicely in the terminal""" + click.echo( + click.style("App Name: ", bold=True, fg="green") + + click.style(variant.app_name, fg="green") + ) + click.echo( + click.style("Variant Name: ", bold=True, fg="blue") + + click.style(variant.variant_name, fg="blue") + ) + click.echo(click.style("Parameters: ", bold=True, fg="cyan")) + if variant.parameters: + for param, value in variant.parameters.items(): + click.echo( + click.style(f" {param}: ", fg="cyan") + + click.style(str(value), fg="cyan") + ) + else: + click.echo(click.style(" Defaults from code", fg="cyan")) + if variant.previous_variant_name: + click.echo( + click.style("Template Variant Name: ", bold=True, fg="magenta") + + click.style(variant.previous_variant_name, fg="magenta") + ) + else: + click.echo( + click.style("Template Variant Name: ", bold=True, fg="magenta") + + click.style("None", fg="magenta") + ) + click.echo( + click.style("-" * 50, bold=True, fg="white") + ) # a line for separating each variant diff --git a/services/completion-stateless-sdk/agenta/cli/main.py b/services/completion-stateless-sdk/agenta/cli/main.py new file mode 100644 index 0000000000..1f17272048 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/cli/main.py @@ -0,0 +1,229 @@ +import os +import re +import shutil +import sys +from typing import Union +from pathlib import Path + +import click +import questionary +import toml + +from agenta.cli import helper +from agenta.cli import variant_configs +from agenta.cli import variant_commands +from agenta.cli import evaluation_commands + +from agenta.client.backend.client import AgentaApi + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") + + +def print_version(ctx, param, value): + if not value or ctx.resilient_parsing: + return + try: + try: + from importlib.metadata import PackageNotFoundError, version + except ImportError: + from importlib_metadata import PackageNotFoundError, version + package_version = version("agenta") + except PackageNotFoundError: + package_version = "package is not installed" + click.echo(f"Agenta CLI version: {package_version}") + ctx.exit() + + +def check_latest_version() -> Union[str, None]: + import requests + + try: + response = requests.get("https://pypi.org/pypi/agenta/json", timeout=360) + response.raise_for_status() + latest_version = response.json()["info"]["version"] + return latest_version + except (requests.RequestException, KeyError): + return None + + +def notify_update(available_version: str): + import importlib.metadata + + installed_version = importlib.metadata.version("agenta") + if available_version > installed_version: + click.echo( + click.style( + f"A new release of agenta is available: {installed_version} → {available_version}", + fg="yellow", + ) + ) + click.echo( + click.style("To upgrade, run: pip install --upgrade agenta", fg="yellow") + ) + + +@click.group() +@click.option( + "--version", + "-v", + is_flag=True, + callback=print_version, + expose_value=False, + is_eager=True, +) +def cli(): + latest_version = check_latest_version() + if latest_version: + notify_update(latest_version) + + +@click.command() +@click.option("--app-name", "--app_name", default=None) +@click.option("--backend-host", "backend_host", default=None) +def init(app_name: str, backend_host: str): + """Initialize a new Agenta app with the template files.""" + + init_option = "Blank App" if backend_host != "" and app_name != "" else "" + + api_key = os.getenv("AGENTA_API_KEY") + + if not app_name: + while True: + app_name = questionary.text("Please enter the app name").ask() + if app_name and re.match("^[a-zA-Z0-9_-]+$", app_name): + break + else: + if app_name is None: # User pressed Ctrl+C + sys.exit(0) + else: + print( + "Invalid input. Please use only alphanumeric characters without spaces." + ) + + try: + backend_hosts = { + "https://cloud.agenta.ai": "On agenta cloud", + "http://localhost": "On my local machine", + } + where_question = backend_hosts.get(backend_host, "On a remote machine") + if not backend_host: + where_question = questionary.select( + "Where are you running agenta?", + choices=[ + "On agenta cloud", + "On my local machine", + "On a remote machine", + ], + ).ask() + + if where_question == "On my local machine": + backend_host = "http://localhost" + elif where_question == "On a remote machine": + backend_host = questionary.text( + "Please provide the IP or URL of your remote host" + ).ask() + elif where_question == "On agenta cloud": + global_backend_host = helper.get_global_config("host") + if global_backend_host: + backend_host = global_backend_host + else: + backend_host = "https://cloud.agenta.ai" + + if not api_key: + api_key = helper.get_api_key(backend_host) + + elif where_question is None: # User pressed Ctrl+C + sys.exit(0) + backend_host = ( + backend_host + if backend_host.startswith("http://") or backend_host.startswith("https://") + else "http://" + backend_host + ) + + # initialize the client with the backend url and api key + client = AgentaApi( + base_url=f"{backend_host}/{BACKEND_URL_SUFFIX}", + api_key=api_key if where_question == "On agenta cloud" else "", + ) + + # Get app_id after creating new app in the backend server + try: + app_id = client.apps.create_app(app_name=app_name).app_id + except Exception as ex: + click.echo(click.style(f"Error: {ex}", fg="red")) + sys.exit(1) + + # Set app toml configuration + config = { + "app_name": app_name, + "app_id": app_id, + "backend_host": backend_host, + "api_key": api_key if where_question == "On agenta cloud" else None, + } + with open("config.toml", "w") as config_file: + toml.dump(config, config_file) + + # Ask for init option + if not init_option: + init_option = questionary.select( + "How do you want to initialize your app?", + choices=["Blank App", "Start from template"], + ).ask() + + # If the user selected the second option, show a list of available templates + if init_option == "Start from template": + current_dir = Path.cwd() + template_dir = Path(__file__).parent.parent / "templates" + templates = [ + folder.name for folder in template_dir.iterdir() if folder.is_dir() + ] + template_desc = [ + toml.load((template_dir / name / "template.toml"))["short_desc"] + for name in templates + ] + + # Show the templates to the user + template = questionary.select( + "Which template do you want to use?", + choices=[ + questionary.Choice( + title=f"{template} - {template_desc}", value=template + ) + for template, template_desc in zip(templates, template_desc) + ], + ).ask() + + # Copy the template files to the current directory + chosen_template_dir = template_dir / template + for file in chosen_template_dir.glob("*"): + if file.name != "template.toml" and not file.is_dir(): + shutil.copy(file, current_dir / file.name) + elif init_option is None: # User pressed Ctrl+C + sys.exit(0) + + # Create a .gitignore file and add some default environment folder names to it + gitignore_content = ( + "# Environments \nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\nmyenv/\n" + ) + if not os.path.exists(".agentaignore"): + with open(".agentaignore", "w") as gitignore_file: + gitignore_file.write(gitignore_content) + + click.echo("App initialized successfully") + if init_option == "Start from template": + click.echo( + "Please check the README.md for further instructions to setup the template." + ) + except Exception as ex: + click.echo(click.style(f"Error: {ex}", fg="red")) + sys.exit(1) + + +# Add the commands to the CLI group +cli.add_command(init) +cli.add_command(variant_configs.config) +cli.add_command(variant_commands.variant) +cli.add_command(evaluation_commands.evaluation) + +if __name__ == "__main__": + cli() diff --git a/services/completion-stateless-sdk/agenta/cli/telemetry.py b/services/completion-stateless-sdk/agenta/cli/telemetry.py new file mode 100644 index 0000000000..76e2c8b7e0 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/cli/telemetry.py @@ -0,0 +1,50 @@ +# Stdlib Imports +from uuid import uuid4 + +# Own Imports +from agenta.cli import helper + +# Third party Imports +from posthog import Posthog + + +# Load telemetry configuration +helper.init_telemetry_config() + + +class EventTracking(Posthog): + _instance = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__(self, api_key: str, host: str) -> None: + super(Posthog, self).__init__(api_key, host) + + def capture_event( + self, + event_name: str, + body: dict, + ) -> None: + """ + Captures an event. + + Args: + event_name (str): The name of the event being captured. + body (dict): Contains the data associated with the event being captured. + """ + + # A unique identifier for the user or entity associated with the event + distinct_id = helper.get_global_config("telemetry_distinct_id") + if not distinct_id: + distinct_id = uuid4() + helper.set_global_config("telemetry_distinct_id", str(distinct_id)) + self.capture(distinct_id, event_name, body) + + +# Initialize event tracking +event_track = EventTracking( + helper.get_global_config("telemetry_api_key"), "https://app.posthog.com" +) diff --git a/services/completion-stateless-sdk/agenta/cli/variant_commands.py b/services/completion-stateless-sdk/agenta/cli/variant_commands.py new file mode 100644 index 0000000000..ca144843bd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/cli/variant_commands.py @@ -0,0 +1,526 @@ +import os +import re +import sys +from typing import List +from pathlib import Path + +from requests.exceptions import ConnectionError + +import click +import questionary +import toml + +from agenta.cli import helper +from agenta.cli.telemetry import event_track +from agenta.client.backend.client import AgentaApi +from agenta.client.api import add_variant_to_server +from agenta.client.api_models import AppVariant, Image +from agenta.docker.docker_utils import build_tar_docker_container +from agenta.client.backend.types.variant_action import VariantAction + + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") + + +@click.group() +def variant(): + """Commands for variants""" + pass + + +def add_variant( + app_folder: str, file_name: str, host: str, overwrite: bool, config_name="default" +) -> str: + """ + Adds a variant to the backend. Sends the code as a tar to the backend, which then containerizes it and adds it to the backend store. + The app variant name to be added is + {file_name.removesuffix(".py")}.{config_name} + Args: + variant_name: the name of the variant + app_folder: the folder of the app + file_name: the name of the file to run. + config_name: the name of the config to use for now it is always default + Returns: + the name of the code base and variant(useful for serve) + """ + + app_path = Path(app_folder) + config_file = app_path / "config.toml" + config = toml.load(config_file) + + app_name = config["app_name"] + app_id = config["app_id"] + api_key = config.get("api_key", "") + + config_name = "default" + base_name = file_name.removesuffix(".py") + variant_name = f"{base_name}.{config_name}" + + # check files in folder + app_file = app_path / file_name + if not app_file.exists(): + click.echo( + click.style( + f"No {file_name} exists! Please make sure you are in the right directory", + fg="red", + ) + ) + return None + + env_file = app_path / ".env" + if not env_file.exists(): + continue_without_env = questionary.confirm( + "No .env file found! Are you sure you handled the API keys needed in your application?\n Do you want to continue without it?" + ).ask() + if not continue_without_env: + click.echo("Operation cancelled.") + sys.exit(0) + + requirements_file = app_path / "requirements.txt" + if not requirements_file.exists(): + continue_without_requirements = questionary.confirm( + "No requirements.txt file found! Are you sure you do not need it in your application?\n Do you want to continue without it?" + ).ask() + if not continue_without_requirements: + click.echo("Operation cancelled.") + sys.exit(0) + + # Validate variant name + if not re.match("^[a-zA-Z0-9_]+$", base_name): + click.echo( + click.style( + "Invalid input. Please use only alphanumeric characters without spaces in the filename.", + fg="red", + ) + ) + sys.exit(0) + + # update the config file with the variant names from the backend + variant_name = f"{base_name}.{config_name}" + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + if variant_name in config["variants"] and not overwrite: + if not overwrite: + overwrite = questionary.confirm( + "This variant already exists. Do you want to overwrite it?" + ).ask() + if not overwrite: + click.echo("Operation cancelled.") + return + + try: + click.echo( + click.style( + f"Preparing code base {base_name} into a tar file...", + fg="bright_black", + ) + ) + tar_path = build_tar_docker_container(folder=app_path, file_name=file_name) + + click.echo( + click.style( + f"Building code base {base_name} for {variant_name} into a docker image...", + fg="bright_black", + ) + ) + with tar_path.open("rb") as tar_file: + built_image: Image = client.containers.build_image( + app_id=app_id, + base_name=base_name, + tar_file=tar_file, + ) + image = Image(**built_image.dict()) + if tar_path.exists(): + tar_path.unlink() + + except Exception as ex: + click.echo(click.style(f"Error while building image: {ex}", fg="red")) + raise + try: + if overwrite: + click.echo( + click.style( + f"Updating {base_name} to server...", + fg="bright_black", + ) + ) + variant_id = config["variant_ids"][config["variants"].index(variant_name)] + client.variants.update_variant_image( + variant_id=variant_id, + docker_id=image.docker_id, + tags=image.tags, + type=image.type, + ) # this automatically restarts + else: + click.echo(click.style(f"Adding {variant_name} to server...", fg="yellow")) + response = add_variant_to_server( + app_id, + base_name, + image, + f"{host}/{BACKEND_URL_SUFFIX}", + api_key, + ) + variant_id = response["variant_id"] + config["variants"].append(variant_name) + config["variant_ids"].append(variant_id) + except Exception as ex: + if overwrite: + click.echo(click.style(f"Error while updating variant: {ex}", fg="red")) + else: + click.echo(click.style(f"Error while adding variant: {ex}", fg="red")) + raise + + agenta_dir = Path.home() / ".agenta" + global_toml_file = toml.load(agenta_dir / "config.toml") + tracking_enabled: bool = global_toml_file["telemetry_tracking_enabled"] + if overwrite: + # Track a deployment event + if tracking_enabled: + get_user_id = client.fetch_user_profile() + user_id = get_user_id["id"] + event_track.capture_event( + "app_deployment", + body={ + "app_id": app_id, + "deployed_by": user_id, + "environment": "CLI", + "version": "cloud" if api_key else "oss", + }, + ) + + click.echo( + click.style( + f"Variant {variant_name} for App {app_name} updated successfully 🎉", + bold=True, + fg="green", + ) + ) + else: + # Track a deployment event + if tracking_enabled: + get_user_id = client.fetch_user_profile() + user_id = get_user_id["id"] + event_track.capture_event( + "app_deployment", + body={ + "app_id": app_id, + "deployed_by": user_id, + "environment": "CLI", + "version": "cloud" if api_key else "oss", + }, + ) + + click.echo( + click.style( + f"Variant {variant_name} for App {app_name} added successfully to Agenta!", + fg="green", + ) + ) + # Last step us to save the config file + toml.dump(config, config_file.open("w")) + if overwrite: + # In the case we are overwriting, don't return anything. Otherwise the command server would attempt to start the container which would result in an error!!! + # TODO: Improve this stupid design + return None + else: + return variant_id + + +def start_variant(variant_id: str, app_folder: str, host: str): + """ + Starts a container for an existing variant + Args: + variant_name: the name of the variant + app_folder: the folder of the app + """ + app_folder = Path(app_folder) + config_file = app_folder / "config.toml" + config = toml.load(config_file) + app_id = config["app_id"] + api_key = config.get("api_key", "") + + if len(config["variants"]) == 0: + click.echo("No variants found. Please add a variant first.") + return + + if variant_id: + if variant_id not in config["variant_ids"]: + click.echo( + click.style( + f"Variant {variant_id} not found in backend. Maybe you removed it in the webUI?", + fg="red", + ) + ) + return + else: + variant_name = questionary.select( + "Please choose a variant", choices=config["variants"] + ).ask() + variant_id = config["variant_ids"][config["variants"].index(variant_name)] + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + variant = client.variants.start_variant( + variant_id=variant_id, + action=VariantAction(action="START"), + ) + endpoint = variant.uri + click.echo("\n" + click.style("Congratulations! 🎉", bold=True, fg="green")) + click.echo( + click.style("Your app has been deployed locally as an API. 🚀", fg="cyan") + + click.style(" You can access it here: ", fg="white") + + click.style(f"{endpoint}/", bold=True, fg="yellow") + ) + + click.echo( + click.style("\nRead the API documentation. 📚", fg="cyan") + + click.style(" It's available at: ", fg="white") + + click.style(f"{endpoint}/docs", bold=True, fg="yellow") + ) + + webui_host = "http://localhost" if host == "localhost" else host + click.echo( + click.style( + "\nStart experimenting with your app in the playground. 🎮", + fg="cyan", + ) + + click.style(" Go to: ", fg="white") + + click.style(f"{webui_host}/apps/{app_id}/playground", bold=True, fg="yellow") + + "\n" + ) + + +def remove_variant(variant_name: str, app_folder: str, host: str): + """ + Removes a variant from the server + Args: + variant_name: the name of the variant + app_folder: the folder of the app + """ + config_file = Path(app_folder) / "config.toml" + config = toml.load(config_file) + app_name = config["app_name"] + api_key = config.get("api_key", "") + + if not config["variants"]: + click.echo( + click.style( + f"No variants found for app {app_name}. Make sure you have deployed at least one variant.", + fg="red", + ) + ) + return + + if variant_name: + if variant_name not in config["variants"]: + click.echo( + click.style( + f"Variant {variant_name} not found in backend. Maybe you already removed it in the webUI?", + fg="red", + ) + ) + return + else: + variant_name = questionary.select( + "Please choose a variant", choices=config["variants"] + ).ask() + variant_id = config["variant_ids"][config["variants"].index(variant_name)] + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + try: + client.variants.remove_variant(variant_id=variant_id) + except Exception as ex: + click.echo( + click.style( + f"Error while removing variant {variant_name} for App {app_name} from the backend", + fg="red", + ) + ) + click.echo(click.style(f"Error message: {ex}", fg="red")) + return + + click.echo( + click.style( + f"Variant {variant_name} for App {app_name} removed successfully from Agenta!", + fg="green", + ) + ) + + +def list_variants(app_folder: str, host: str): + """List available variants for an app and print them to the console + + Arguments: + app_folder -- _description_ + """ + config_file = Path(app_folder) / "config.toml" + config = toml.load(config_file) + app_name = config["app_name"] + app_id = config["app_id"] + api_key = config.get("api_key", "") + variants = [] + + client = AgentaApi( + base_url=f"{host}/{BACKEND_URL_SUFFIX}", + api_key=api_key, + ) + + try: + variants: List[AppVariant] = client.apps.list_app_variants(app_id=app_id) + except Exception as ex: + raise ex + + if variants: + for variant in variants: + helper.display_app_variant(variant) + else: + click.echo(click.style(f"No variants found for app {app_name}", fg="red")) + + +def config_check(app_folder: str): + """Check the config file and update it from the backend + + Arguments: + app_folder -- the app folder + """ + + click.echo(click.style("\nChecking and updating config file...", fg="bright_black")) + app_folder = Path(app_folder) + config_file = app_folder / "config.toml" + if not config_file.exists(): + click.echo( + click.style( + f"Config file not found in {app_folder}. Make sure you are in the right folder and that you have run agenta init first.", + fg="red", + ) + ) + return + host = get_host(app_folder) # TODO: Refactor the whole config thing + helper.update_config_from_backend(config_file, host=host) + + +def get_host(app_folder: str) -> str: + """Fetches the host from the config""" + app_folder = Path(app_folder) + config_file = app_folder / "config.toml" + config = toml.load(config_file) + if "backend_host" not in config: + host = "http://localhost" + else: + host = config["backend_host"] + return host + + +@variant.command(name="remove") +@click.option("--app_folder", default=".") +@click.option("--variant_name", default="") +def remove_variant_cli(variant_name: str, app_folder: str): + """Remove an existing variant.""" + + try: + config_check(app_folder) + remove_variant( + variant_name=variant_name, + app_folder=app_folder, + host=get_host(app_folder), + ) + except Exception as ex: + click.echo(click.style(f"Error while removing variant: {ex}", fg="red")) + + +@variant.command( + name="serve", + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True, + ), +) +@click.option("--app_folder", default=".") +@click.option("--file_name", default=None, help="The name of the file to run") +@click.option( + "--overwrite", + is_flag=True, + help="Overwrite the existing variant if it exists", +) +@click.pass_context +def serve_cli(ctx, app_folder: str, file_name: str, overwrite: bool): + """Adds a variant to the web UI and serves the API locally.""" + + if not file_name: + if ctx.args: + file_name = ctx.args[0] + else: + error_msg = "To serve variant, kindly provide the filename and run:\n" + error_msg += ">>> agenta variant serve --file_name .py\n" + error_msg += "or\n" + error_msg += ">>> agenta variant serve .py" + click.echo(click.style(f"{error_msg}", fg="red")) + sys.exit(1) + + try: + config_check(app_folder) + except Exception as e: + click.echo(click.style("Failed during configuration check.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + try: + host = get_host(app_folder) + except Exception as e: + click.echo(click.style("Failed to retrieve the host.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + try: + api_key = helper.get_global_config("api_key") + except Exception as e: + click.echo(click.style("Failed to retrieve the api key.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + try: + variant_id = add_variant( + app_folder=app_folder, file_name=file_name, host=host, overwrite=overwrite + ) + except Exception as e: + click.echo(click.style("Failed to add variant.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + if variant_id: + try: + start_variant(variant_id=variant_id, app_folder=app_folder, host=host) + except ConnectionError: + error_msg = "Failed to connect to Agenta backend. Here's how you can solve the issue:\n" + error_msg += "- First, please ensure that the backend service is running and accessible.\n" + error_msg += ( + "- Second, try restarting the containers (if using Docker Compose)." + ) + click.echo(click.style(f"{error_msg}", fg="red")) + sys.exit(1) + except Exception as e: + click.echo(click.style("Failed to start container with LLM app.", fg="red")) + click.echo(click.style(f"Error message: {str(e)}", fg="red")) + sys.exit(1) + + +@variant.command(name="list") +@click.option("--app_folder", default=".") +def list_variants_cli(app_folder: str): + """List the variants in the backend""" + try: + config_check(app_folder) + list_variants(app_folder=app_folder, host=get_host(app_folder)) + except Exception as ex: + click.echo(click.style(f"Error while listing variants: {ex}", fg="red")) diff --git a/services/completion-stateless-sdk/agenta/cli/variant_configs.py b/services/completion-stateless-sdk/agenta/cli/variant_configs.py new file mode 100644 index 0000000000..3f43bbc104 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/cli/variant_configs.py @@ -0,0 +1,49 @@ +import click +from agenta.cli import helper + + +@click.group() +def config(): + """Commands for variants configurations""" + pass + + +def update_backend_host(backend_host: str): + """Check the config file and update the backend URL + + Arguments: + app_folder -- the app folder + backend_host -- the backend host + """ + + click.echo( + click.style("\nChecking and updating global backend host...", fg="bright_black") + ) + helper.set_global_config("host", backend_host) + + +@config.command( + name="set-host", + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True, + ), +) +@click.option( + "--backend_host", default=None, help="The URL of the backend host to use." +) +@click.pass_context +def set_config_url(ctx, backend_host: str): + """Set the backend URL in the app configuration""" + + try: + if not backend_host: + if ctx.args: + backend_host = ctx.args[0] + else: + click.echo(click.style("Backend host URL not specified", fg="red")) + + update_backend_host(backend_host) + click.echo(click.style("Backend host updated successfully! 🎉\n")) + except Exception as ex: + click.echo(click.style(f"Error updating backend host: {ex}", fg="red")) diff --git a/services/completion-stateless-sdk/agenta/client/Readme.md b/services/completion-stateless-sdk/agenta/client/Readme.md new file mode 100644 index 0000000000..39c8adc31f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/Readme.md @@ -0,0 +1,104 @@ +Client code to communicate with the backend. + +Currently the models are manually copied from the backend code. This needs to change. + +# Generate Backend + +To generate the client code using Fern, follow the steps below. + +1. Open a Terminal and navigate to the folder where this Readme.md file is. For example; +```bash +cd agenta/agenta-cli/agenta/client +``` + +2. Next ensure you have installed Fern by executing the command; +```bash +npm install -g fern-api +``` + +3. Execute this command to initialize Fern to import and use the OpenAPI spec; + +> To use an OpenAPI spec, you can pass in the filepath or URL. +> We'll need to log in to use fern. +> We'll be using a url to the openapi.json for [Agenta Cloud](https://cloud.agenta.ai). +> Alternatively, for `cloud-dev` we could use [Cloud Local](http://localhost). + +```bash +fern init --openapi https://cloud.agenta.ai/api/openapi.json +# fern init --openapi http://localhost/api/openapi.json +``` + +4. Add the Fern Python SDK; +```bash +fern add fern-python-sdk +``` + +5. Go to the generators.yml, which would look like this; +```yaml +default-group: local +groups: + local: + generators: + - name: fernapi/fern-typescript-node-sdk + version: 0.9.5 + output: + location: local-file-system + path: ../sdks/typescript + - name: fernapi/fern-python-sdk + version: 0.6.0 +``` + +6. Remove `fernapi/fern-typescript-node-sdk`; +```yaml +default-group: local +groups: + local: + generators: + - name: fernapi/fern-python-sdk + version: 3.10.6 +``` + +7. Update `fernapi/fern-python-sdk`, which would look like this; +```yaml +default-group: local +groups: + local: + generators: + - name: fernapi/fern-python-sdk + version: 3.10.6 + output: + location: local-file-system + path: ../backend +``` +image + + +8. Go to the fern.config.json file and change the value of "organization" to `agenta` +image + + +9. Generate the client code +```bash + fern generate +``` + +10. Go to `./backend/containers/client.py`, search for the `build_image` function in the AgentaApi class and update `timeout_in_seconds` to `600` in `request_options'. It should now look like this; +```python +_response = self._client_wrapper.httpx_client.request( + "containers/build_image", + method="POST", + params={ + "app_id": app_id, + "base_name": base_name, + }, + data={}, + files={ + "tar_file": tar_file, + }, + request_options={**request_options, "timeout_in_seconds": 600}, + omit=OMIT, +) +``` +image + +11. Delete the `./fern` folder. diff --git a/services/completion-stateless-sdk/agenta/client/__init__.py b/services/completion-stateless-sdk/agenta/client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/completion-stateless-sdk/agenta/client/api.py b/services/completion-stateless-sdk/agenta/client/api.py new file mode 100644 index 0000000000..1bb5880bed --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/api.py @@ -0,0 +1,74 @@ +import os +import toml +import time +import click +from typing import Dict +from pathlib import Path +from agenta.client.backend import client +from agenta.client.api_models import Image +from requests.exceptions import RequestException +from agenta.client.backend.client import AgentaApi +from agenta.client.exceptions import APIRequestError + + +def add_variant_to_server( + app_id: str, + base_name: str, + image: Image, + backend_url: str, + api_key: str, + retries=10, + backoff_factor=1, +) -> Dict: + """ + Adds a variant to the server with a retry mechanism and a single-line loading state. + + Args: + app_id (str): The ID of the app to add the variant to. + base_name (str): The base name for the variant. + image (Image): The image to use for the variant. + retries (int): Number of times to retry the request. + backoff_factor (float): Factor to determine the delay between retries (exponential backoff). + + Returns: + dict: The JSON response from the server. + + Raises: + APIRequestError: If the request to the server fails after retrying. + """ + + click.echo( + click.style("Waiting for the variant to be ready", fg="yellow"), nl=False + ) + + client = AgentaApi( + base_url=backend_url, + api_key=api_key, + ) + for attempt in range(retries): + try: + response = client.apps.add_variant_from_image( + app_id=app_id, + variant_name=f"{base_name.lower()}.default", + base_name=base_name, + config_name="default", + docker_id=image.docker_id, + tags=image.tags, + ) + click.echo(click.style("\nVariant added successfully!", fg="green")) + return response + except RequestException as e: + if attempt < retries - 1: + click.echo(click.style(".", fg="yellow"), nl=False) + time.sleep(backoff_factor * (2**attempt)) + else: + raise APIRequestError( + click.style( + f"\nRequest to app_variant endpoint failed with status code {response.status_code} and error message: {e}.", + fg="red", + ) + ) + except Exception as e: + raise APIRequestError( + click.style(f"\nAn unexpected error occurred: {e}", fg="red") + ) diff --git a/services/completion-stateless-sdk/agenta/client/api_models.py b/services/completion-stateless-sdk/agenta/client/api_models.py new file mode 100644 index 0000000000..6bd5233566 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/api_models.py @@ -0,0 +1,34 @@ +from pydantic import BaseModel +from typing import List, Optional, Dict, Any + + +class AppVariant(BaseModel): + app_id: str + app_name: str + variant_name: str + variant_id: str + parameters: Optional[Dict[str, Any]] + previous_variant_name: Optional[str] + base_name: Optional[str] + config_name: Optional[str] + + +class Variant(BaseModel): + variant_id: str + + +class Image(BaseModel): + type: Optional[str] + docker_id: str + tags: str + + +class URI(BaseModel): + uri: str + + +class VariantConfigPayload(BaseModel): + base_id: str + config_name: str + parameters: Dict[str, Any] + overwrite: bool diff --git a/services/completion-stateless-sdk/agenta/client/backend/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/__init__.py new file mode 100644 index 0000000000..8907b11d29 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/__init__.py @@ -0,0 +1,257 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + AgentaNodeDto, + AgentaNodeDtoNodesValue, + AgentaNodesResponse, + AgentaRootDto, + AgentaRootsResponse, + AgentaTreeDto, + AgentaTreesResponse, + AggregatedResult, + AggregatedResultEvaluatorConfig, + App, + AppVariantResponse, + AppVariantRevision, + BaseOutput, + BodyImportTestset, + CollectStatusResponse, + ConfigDb, + ConfigDto, + ConfigResponseModel, + CorrectAnswer, + CreateAppOutput, + CreateSpan, + CreateTraceResponse, + DockerEnvVars, + EnvironmentOutput, + EnvironmentOutputExtended, + EnvironmentRevision, + Error, + Evaluation, + EvaluationScenario, + EvaluationScenarioInput, + EvaluationScenarioOutput, + EvaluationScenarioResult, + EvaluationScenarioScoreUpdate, + EvaluationStatusEnum, + EvaluationType, + Evaluator, + EvaluatorConfig, + EvaluatorMappingOutputInterface, + EvaluatorOutputInterface, + ExceptionDto, + GetConfigResponse, + HttpValidationError, + HumanEvaluation, + HumanEvaluationScenario, + HumanEvaluationScenarioInput, + HumanEvaluationScenarioOutput, + HumanEvaluationScenarioUpdate, + HumanEvaluationUpdate, + Image, + InviteRequest, + LifecycleDto, + LinkDto, + ListApiKeysResponse, + LlmRunRateLimit, + LlmTokens, + LmProvidersEnum, + NewHumanEvaluation, + NewTestset, + NodeDto, + NodeType, + OTelContextDto, + OTelEventDto, + OTelExtraDto, + OTelLinkDto, + OTelSpanDto, + OTelSpanKind, + OTelSpansResponse, + OTelStatusCode, + Organization, + OrganizationOutput, + Outputs, + ParentDto, + Permission, + ReferenceDto, + ReferenceRequestModel, + Result, + RootDto, + Score, + SimpleEvaluationOutput, + Span, + SpanDetail, + SpanDto, + SpanDtoNodesValue, + SpanStatusCode, + SpanVariant, + StatusCode, + StatusDto, + Template, + TemplateImageInfo, + TestSetOutputResponse, + TestSetSimpleResponse, + TimeDto, + TraceDetail, + TreeDto, + TreeType, + UpdateAppOutput, + Uri, + ValidationError, + ValidationErrorLocItem, + VariantAction, + VariantActionEnum, + WithPagination, + WorkspaceMemberResponse, + WorkspacePermission, + WorkspaceResponse, + WorkspaceRole, + WorkspaceRoleResponse, +) +from .errors import UnprocessableEntityError +from . import ( + apps, + bases, + configs, + containers, + environments, + evaluations, + evaluators, + observability, + observability_v_1, + testsets, + variants, +) +from .client import AgentaApi, AsyncAgentaApi +from .containers import ContainerTemplatesResponse +from .observability_v_1 import Format, QueryTracesResponse +from .variants import AddVariantFromBaseAndConfigResponse + +__all__ = [ + "AddVariantFromBaseAndConfigResponse", + "AgentaApi", + "AgentaNodeDto", + "AgentaNodeDtoNodesValue", + "AgentaNodesResponse", + "AgentaRootDto", + "AgentaRootsResponse", + "AgentaTreeDto", + "AgentaTreesResponse", + "AggregatedResult", + "AggregatedResultEvaluatorConfig", + "App", + "AppVariantResponse", + "AppVariantRevision", + "AsyncAgentaApi", + "BaseOutput", + "BodyImportTestset", + "CollectStatusResponse", + "ConfigDb", + "ConfigDto", + "ConfigResponseModel", + "ContainerTemplatesResponse", + "CorrectAnswer", + "CreateAppOutput", + "CreateSpan", + "CreateTraceResponse", + "DockerEnvVars", + "EnvironmentOutput", + "EnvironmentOutputExtended", + "EnvironmentRevision", + "Error", + "Evaluation", + "EvaluationScenario", + "EvaluationScenarioInput", + "EvaluationScenarioOutput", + "EvaluationScenarioResult", + "EvaluationScenarioScoreUpdate", + "EvaluationStatusEnum", + "EvaluationType", + "Evaluator", + "EvaluatorConfig", + "EvaluatorMappingOutputInterface", + "EvaluatorOutputInterface", + "ExceptionDto", + "Format", + "GetConfigResponse", + "HttpValidationError", + "HumanEvaluation", + "HumanEvaluationScenario", + "HumanEvaluationScenarioInput", + "HumanEvaluationScenarioOutput", + "HumanEvaluationScenarioUpdate", + "HumanEvaluationUpdate", + "Image", + "InviteRequest", + "LifecycleDto", + "LinkDto", + "ListApiKeysResponse", + "LlmRunRateLimit", + "LlmTokens", + "LmProvidersEnum", + "NewHumanEvaluation", + "NewTestset", + "NodeDto", + "NodeType", + "OTelContextDto", + "OTelEventDto", + "OTelExtraDto", + "OTelLinkDto", + "OTelSpanDto", + "OTelSpanKind", + "OTelSpansResponse", + "OTelStatusCode", + "Organization", + "OrganizationOutput", + "Outputs", + "ParentDto", + "Permission", + "QueryTracesResponse", + "ReferenceDto", + "ReferenceRequestModel", + "Result", + "RootDto", + "Score", + "SimpleEvaluationOutput", + "Span", + "SpanDetail", + "SpanDto", + "SpanDtoNodesValue", + "SpanStatusCode", + "SpanVariant", + "StatusCode", + "StatusDto", + "Template", + "TemplateImageInfo", + "TestSetOutputResponse", + "TestSetSimpleResponse", + "TimeDto", + "TraceDetail", + "TreeDto", + "TreeType", + "UnprocessableEntityError", + "UpdateAppOutput", + "Uri", + "ValidationError", + "ValidationErrorLocItem", + "VariantAction", + "VariantActionEnum", + "WithPagination", + "WorkspaceMemberResponse", + "WorkspacePermission", + "WorkspaceResponse", + "WorkspaceRole", + "WorkspaceRoleResponse", + "apps", + "bases", + "configs", + "containers", + "environments", + "evaluations", + "evaluators", + "observability", + "observability_v_1", + "testsets", + "variants", +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/apps/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/apps/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/apps/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/apps/client.py b/services/completion-stateless-sdk/agenta/client/backend/apps/client.py new file mode 100644 index 0000000000..b709a8538e --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/apps/client.py @@ -0,0 +1,1631 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.app_variant_response import AppVariantResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.app import App +from ..types.create_app_output import CreateAppOutput +from ..types.update_app_output import UpdateAppOutput +from ..types.environment_output import EnvironmentOutput +from ..types.environment_output_extended import EnvironmentOutputExtended +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AppsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list_app_variants( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[AppVariantResponse]: + """ + Retrieve a list of app variants for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve variants for. + stoken_session (SessionContainer, optional): The session container to verify the user's session. Defaults to Depends(verify_session()). + + Returns: + List[AppVariantResponse]: A list of app variants for the given app ID. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.list_app_variants( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variants", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantResponse], + parse_obj_as( + type_=typing.List[AppVariantResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant_by_env( + self, + *, + app_id: str, + environment: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Retrieve the app variant based on the provided app_id and environment. + + Args: + app_id (str): The ID of the app to retrieve the variant for. + environment (str): The environment of the app variant to retrieve. + stoken_session (SessionContainer, optional): The session token container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the app variant is not found (status_code=500), or if a ValueError is raised (status_code=400), or if any other exception is raised (status_code=500). + + Returns: + AppVariantResponse: The retrieved app variant. + + Parameters + ---------- + app_id : str + + environment : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.get_variant_by_env( + app_id="app_id", + environment="environment", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "apps/get_variant_by_env", + method="GET", + params={ + "app_id": app_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_apps( + self, + *, + app_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[App]: + """ + Retrieve a list of apps filtered by app_name. + + Args: + app_name (Optional[str]): The name of the app to filter by. + stoken_session (SessionContainer): The session container. + + Returns: + List[App]: A list of apps filtered by app_name. + + Raises: + HTTPException: If there was an error retrieving the list of apps. + + Parameters + ---------- + app_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[App] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.list_apps() + """ + _response = self._client_wrapper.httpx_client.request( + "apps", + method="GET", + params={"app_name": app_name}, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[App], + parse_obj_as( + type_=typing.List[App], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_app( + self, + *, + app_name: str, + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAppOutput: + """ + Create a new app for a user. + + Args: + payload (CreateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + CreateAppOutput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_name : str + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAppOutput + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.create_app( + app_name="app_name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "apps", + method="POST", + json={ + "app_name": app_name, + "project_id": project_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAppOutput, + parse_obj_as( + type_=CreateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_app( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Remove app, all its variant, containers and images + + Arguments: + app -- App to remove + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.remove_app( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_app( + self, + app_id: str, + *, + app_name: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> UpdateAppOutput: + """ + Update an app for a user. + + Args: + app_id (str): The ID of the app. + payload (UpdateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + UpdateAppOuput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_id : str + + app_name : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + UpdateAppOutput + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.update_app( + app_id="app_id", + app_name="app_name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="PATCH", + json={ + "app_name": app_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + UpdateAppOutput, + parse_obj_as( + type_=UpdateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_variant_from_image( + self, + app_id: str, + *, + variant_name: str, + docker_id: str, + tags: str, + base_name: typing.Optional[str] = OMIT, + config_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Add a new variant to an app based on a Docker image. + + Args: + app_id (str): The ID of the app to add the variant to. + payload (AddVariantFromImagePayload): The payload containing information about the variant to add. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the feature flag is set to "demo" or if the image does not have a tag starting with the registry name (agenta-server) or if the image is not found or if the user does not have access to the app. + + Returns: + dict: The newly added variant. + + Parameters + ---------- + app_id : str + + variant_name : str + + docker_id : str + + tags : str + + base_name : typing.Optional[str] + + config_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.add_variant_from_image( + app_id="app_id", + variant_name="variant_name", + docker_id="docker_id", + tags="tags", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variant/from-image", + method="POST", + json={ + "variant_name": variant_name, + "docker_id": docker_id, + "tags": tags, + "base_name": base_name, + "config_name": config_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_app_and_variant_from_template( + self, + *, + app_name: str, + template_id: str, + env_vars: typing.Dict[str, str], + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Create an app and variant from a template. + + Args: + payload (CreateAppVariant): The payload containing the app and variant information. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the user has reached the app limit or if an app with the same name already exists. + + Returns: + AppVariantResponse: The output of the created app variant. + + Parameters + ---------- + app_name : str + + template_id : str + + env_vars : typing.Dict[str, str] + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.create_app_and_variant_from_template( + app_name="app_name", + template_id="template_id", + env_vars={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "apps/app_and_variant_from_template", + method="POST", + json={ + "app_name": app_name, + "template_id": template_id, + "project_id": project_id, + "env_vars": env_vars, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EnvironmentOutput]: + """ + Retrieve a list of environments for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve environments for. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + List[EnvironmentOutput]: A list of environment objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EnvironmentOutput] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.list_environments( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EnvironmentOutput], + parse_obj_as( + type_=typing.List[EnvironmentOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def environment_revisions( + self, + app_id: str, + environment_name: typing.Optional[typing.Any], + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EnvironmentOutputExtended: + """ + Parameters + ---------- + app_id : str + + environment_name : typing.Optional[typing.Any] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EnvironmentOutputExtended + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.apps.environment_revisions( + app_id="string", + environment_name={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/revisions/{jsonable_encoder(environment_name)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EnvironmentOutputExtended, + parse_obj_as( + type_=EnvironmentOutputExtended, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncAppsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list_app_variants( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[AppVariantResponse]: + """ + Retrieve a list of app variants for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve variants for. + stoken_session (SessionContainer, optional): The session container to verify the user's session. Defaults to Depends(verify_session()). + + Returns: + List[AppVariantResponse]: A list of app variants for the given app ID. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.list_app_variants( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variants", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantResponse], + parse_obj_as( + type_=typing.List[AppVariantResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant_by_env( + self, + *, + app_id: str, + environment: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Retrieve the app variant based on the provided app_id and environment. + + Args: + app_id (str): The ID of the app to retrieve the variant for. + environment (str): The environment of the app variant to retrieve. + stoken_session (SessionContainer, optional): The session token container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the app variant is not found (status_code=500), or if a ValueError is raised (status_code=400), or if any other exception is raised (status_code=500). + + Returns: + AppVariantResponse: The retrieved app variant. + + Parameters + ---------- + app_id : str + + environment : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.get_variant_by_env( + app_id="app_id", + environment="environment", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps/get_variant_by_env", + method="GET", + params={ + "app_id": app_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_apps( + self, + *, + app_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[App]: + """ + Retrieve a list of apps filtered by app_name. + + Args: + app_name (Optional[str]): The name of the app to filter by. + stoken_session (SessionContainer): The session container. + + Returns: + List[App]: A list of apps filtered by app_name. + + Raises: + HTTPException: If there was an error retrieving the list of apps. + + Parameters + ---------- + app_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[App] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.list_apps() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps", + method="GET", + params={"app_name": app_name}, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[App], + parse_obj_as( + type_=typing.List[App], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_app( + self, + *, + app_name: str, + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAppOutput: + """ + Create a new app for a user. + + Args: + payload (CreateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + CreateAppOutput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_name : str + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAppOutput + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.create_app( + app_name="app_name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps", + method="POST", + json={ + "app_name": app_name, + "project_id": project_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAppOutput, + parse_obj_as( + type_=CreateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_app( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Remove app, all its variant, containers and images + + Arguments: + app -- App to remove + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.remove_app( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_app( + self, + app_id: str, + *, + app_name: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> UpdateAppOutput: + """ + Update an app for a user. + + Args: + app_id (str): The ID of the app. + payload (UpdateApp): The payload containing the app name. + stoken_session (SessionContainer): The session container containing the user's session token. + + Returns: + UpdateAppOuput: The output containing the newly created app's ID and name. + + Raises: + HTTPException: If there is an error creating the app or the user does not have permission to access the app. + + Parameters + ---------- + app_id : str + + app_name : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + UpdateAppOutput + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.update_app( + app_id="app_id", + app_name="app_name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}", + method="PATCH", + json={ + "app_name": app_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + UpdateAppOutput, + parse_obj_as( + type_=UpdateAppOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_variant_from_image( + self, + app_id: str, + *, + variant_name: str, + docker_id: str, + tags: str, + base_name: typing.Optional[str] = OMIT, + config_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Add a new variant to an app based on a Docker image. + + Args: + app_id (str): The ID of the app to add the variant to. + payload (AddVariantFromImagePayload): The payload containing information about the variant to add. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the feature flag is set to "demo" or if the image does not have a tag starting with the registry name (agenta-server) or if the image is not found or if the user does not have access to the app. + + Returns: + dict: The newly added variant. + + Parameters + ---------- + app_id : str + + variant_name : str + + docker_id : str + + tags : str + + base_name : typing.Optional[str] + + config_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.add_variant_from_image( + app_id="app_id", + variant_name="variant_name", + docker_id="docker_id", + tags="tags", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/variant/from-image", + method="POST", + json={ + "variant_name": variant_name, + "docker_id": docker_id, + "tags": tags, + "base_name": base_name, + "config_name": config_name, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_app_and_variant_from_template( + self, + *, + app_name: str, + template_id: str, + env_vars: typing.Dict[str, str], + project_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Create an app and variant from a template. + + Args: + payload (CreateAppVariant): The payload containing the app and variant information. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the user has reached the app limit or if an app with the same name already exists. + + Returns: + AppVariantResponse: The output of the created app variant. + + Parameters + ---------- + app_name : str + + template_id : str + + env_vars : typing.Dict[str, str] + + project_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.create_app_and_variant_from_template( + app_name="app_name", + template_id="template_id", + env_vars={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "apps/app_and_variant_from_template", + method="POST", + json={ + "app_name": app_name, + "template_id": template_id, + "project_id": project_id, + "env_vars": env_vars, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, app_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EnvironmentOutput]: + """ + Retrieve a list of environments for a given app ID. + + Args: + app_id (str): The ID of the app to retrieve environments for. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + List[EnvironmentOutput]: A list of environment objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EnvironmentOutput] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.list_environments( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EnvironmentOutput], + parse_obj_as( + type_=typing.List[EnvironmentOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def environment_revisions( + self, + app_id: str, + environment_name: typing.Optional[typing.Any], + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EnvironmentOutputExtended: + """ + Parameters + ---------- + app_id : str + + environment_name : typing.Optional[typing.Any] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EnvironmentOutputExtended + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.apps.environment_revisions( + app_id="string", + environment_name={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"apps/{jsonable_encoder(app_id)}/revisions/{jsonable_encoder(environment_name)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EnvironmentOutputExtended, + parse_obj_as( + type_=EnvironmentOutputExtended, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/bases/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/bases/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/bases/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/bases/client.py b/services/completion-stateless-sdk/agenta/client/backend/bases/client.py new file mode 100644 index 0000000000..3e16b47b10 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/bases/client.py @@ -0,0 +1,190 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import SyncClientWrapper +import typing +from ..core.request_options import RequestOptions +from ..types.base_output import BaseOutput +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + + +class BasesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list_bases( + self, + *, + app_id: str, + base_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[BaseOutput]: + """ + Retrieve a list of bases filtered by app_id and base_name. + + Args: + request (Request): The incoming request. + app_id (str): The ID of the app to filter by. + base_name (Optional[str], optional): The name of the base to filter by. Defaults to None. + + Returns: + List[BaseOutput]: A list of BaseOutput objects representing the filtered bases. + + Raises: + HTTPException: If there was an error retrieving the bases. + + Parameters + ---------- + app_id : str + + base_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[BaseOutput] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.bases.list_bases( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "bases", + method="GET", + params={ + "app_id": app_id, + "base_name": base_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[BaseOutput], + parse_obj_as( + type_=typing.List[BaseOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncBasesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list_bases( + self, + *, + app_id: str, + base_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[BaseOutput]: + """ + Retrieve a list of bases filtered by app_id and base_name. + + Args: + request (Request): The incoming request. + app_id (str): The ID of the app to filter by. + base_name (Optional[str], optional): The name of the base to filter by. Defaults to None. + + Returns: + List[BaseOutput]: A list of BaseOutput objects representing the filtered bases. + + Raises: + HTTPException: If there was an error retrieving the bases. + + Parameters + ---------- + app_id : str + + base_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[BaseOutput] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.bases.list_bases( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "bases", + method="GET", + params={ + "app_id": app_id, + "base_name": base_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[BaseOutput], + parse_obj_as( + type_=typing.List[BaseOutput], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/client.py b/services/completion-stateless-sdk/agenta/client/backend/client.py new file mode 100644 index 0000000000..9fc9784d8a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/client.py @@ -0,0 +1,3275 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +import httpx +from .core.client_wrapper import SyncClientWrapper +from .observability.client import ObservabilityClient +from .apps.client import AppsClient +from .variants.client import VariantsClient +from .evaluations.client import EvaluationsClient +from .evaluators.client import EvaluatorsClient +from .testsets.client import TestsetsClient +from .containers.client import ContainersClient +from .environments.client import EnvironmentsClient +from .bases.client import BasesClient +from .configs.client import ConfigsClient +from .observability_v_1.client import ObservabilityV1Client +from .core.request_options import RequestOptions +from .types.list_api_keys_response import ListApiKeysResponse +from .core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from .core.api_error import ApiError +from .errors.unprocessable_entity_error import UnprocessableEntityError +from .types.http_validation_error import HttpValidationError +from .core.jsonable_encoder import jsonable_encoder +from .types.organization import Organization +from .types.organization_output import OrganizationOutput +from .types.invite_request import InviteRequest +from .core.serialization import convert_and_respect_annotation_metadata +from .types.workspace_response import WorkspaceResponse +import datetime as dt +from .types.workspace_role_response import WorkspaceRoleResponse +from .types.permission import Permission +from .core.client_wrapper import AsyncClientWrapper +from .observability.client import AsyncObservabilityClient +from .apps.client import AsyncAppsClient +from .variants.client import AsyncVariantsClient +from .evaluations.client import AsyncEvaluationsClient +from .evaluators.client import AsyncEvaluatorsClient +from .testsets.client import AsyncTestsetsClient +from .containers.client import AsyncContainersClient +from .environments.client import AsyncEnvironmentsClient +from .bases.client import AsyncBasesClient +from .configs.client import AsyncConfigsClient +from .observability_v_1.client import AsyncObservabilityV1Client + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AgentaApi: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : str + The base url to use for requests from the client. + + api_key : str + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.Client] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + """ + + def __init__( + self, + *, + base_url: str, + api_key: str, + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.Client] = None, + ): + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else None + ) + self._client_wrapper = SyncClientWrapper( + base_url=base_url, + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.Client( + timeout=_defaulted_timeout, follow_redirects=follow_redirects + ) + if follow_redirects is not None + else httpx.Client(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.observability = ObservabilityClient(client_wrapper=self._client_wrapper) + self.apps = AppsClient(client_wrapper=self._client_wrapper) + self.variants = VariantsClient(client_wrapper=self._client_wrapper) + self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper) + self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper) + self.testsets = TestsetsClient(client_wrapper=self._client_wrapper) + self.containers = ContainersClient(client_wrapper=self._client_wrapper) + self.environments = EnvironmentsClient(client_wrapper=self._client_wrapper) + self.bases = BasesClient(client_wrapper=self._client_wrapper) + self.configs = ConfigsClient(client_wrapper=self._client_wrapper) + self.observability_v_1 = ObservabilityV1Client( + client_wrapper=self._client_wrapper + ) + + def list_api_keys( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[ListApiKeysResponse]: + """ + List all API keys associated with the authenticated user. + + Args: + request (Request): The incoming request object. + + Returns: + List[ListAPIKeysResponse]: A list of API Keys associated with the user. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ListApiKeysResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.list_api_keys() + """ + _response = self._client_wrapper.httpx_client.request( + "keys", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ListApiKeysResponse], + parse_obj_as( + type_=typing.List[ListApiKeysResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_api_key( + self, + *, + workspace_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Creates an API key for a user. + + Args: + request (Request): The request object containing the user ID in the request state. + + Returns: + str: The created API key. + + Parameters + ---------- + workspace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.create_api_key( + workspace_id="workspace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "keys", + method="POST", + params={ + "workspace_id": workspace_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + str, + parse_obj_as( + type_=str, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Delete an API key with the given key prefix for the authenticated user. + + Args: + key_prefix (str): The prefix of the API key to be deleted. + request (Request): The incoming request object. + + Returns: + dict: A dictionary containing a success message upon successful deletion. + + Raises: + HTTPException: If the API key is not found or does not belong to the user. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.delete_api_key( + key_prefix="key_prefix", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def validate_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + This Function is called by the CLI and is used to validate an API key provided by a user in agenta init setup. + Returns: + bool: True. If the request reaches this point, the API key is valid. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.validate_api_key( + key_prefix="key_prefix", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}/validate", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_organizations( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Organization]: + """ + Returns a list of organizations associated with the user's session. + + Args: + stoken_session (SessionContainer): The user's session token. + + Returns: + list[Organization]: A list of organizations associated with the user's session. + + Raises: + HTTPException: If there is an error retrieving the organizations from the database. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Organization] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.list_organizations() + """ + _response = self._client_wrapper.httpx_client.request( + "organizations", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Organization], + parse_obj_as( + type_=typing.List[Organization], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_organization( + self, + *, + name: str, + owner: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + name : str + + owner : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.create_organization( + name="name", + owner="owner", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "organizations", + method="POST", + json={ + "name": name, + "owner": owner, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_own_org( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> OrganizationOutput: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + OrganizationOutput + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.get_own_org() + """ + _response = self._client_wrapper.httpx_client.request( + "organizations/own", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + OrganizationOutput, + parse_obj_as( + type_=OrganizationOutput, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_organization_details( + self, org_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Get an organization's details. + + Raises: + HTTPException: _description_ + Permission Denied + + Returns: + OrganizationDB Instance + + Parameters + ---------- + org_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.fetch_organization_details( + org_id="org_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_organization( + self, + org_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.update_organization( + org_id="org_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def invite_user_to_workspace( + self, + org_id: str, + workspace_id: str, + *, + request: typing.Sequence[InviteRequest], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + request : typing.Sequence[InviteRequest] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi, InviteRequest + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.invite_user_to_workspace( + org_id="org_id", + workspace_id="workspace_id", + request=[ + InviteRequest( + email="email", + roles=["roles"], + ) + ], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, + annotation=typing.Sequence[InviteRequest], + direction="write", + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def resend_invitation( + self, + org_id: str, + workspace_id: str, + *, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Resend an invitation to a user to an Organization. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Resent invitation to user; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.resend_invitation( + org_id="org_id", + workspace_id="workspace_id", + email="email", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/resend", + method="POST", + json={ + "email": email, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def accept_invitation( + self, + org_id: str, + workspace_id: str, + *, + project_id: str, + token: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Accept an invitation to a workspace. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Accepted invitation to workspace; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + project_id : str + + token : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.accept_invitation( + org_id="org_id", + workspace_id="workspace_id", + project_id="project_id", + token="token", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/accept", + method="POST", + params={ + "project_id": project_id, + }, + json={ + "token": token, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_workspace( + self, + org_id: str, + *, + name: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + name : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.create_workspace( + org_id="org_id", + name="name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces", + method="POST", + json={ + "name": name, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_workspace( + self, + org_id: str, + workspace_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[dt.datetime] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[dt.datetime] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.update_workspace( + org_id="org_id", + workspace_id="workspace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_all_workspace_roles( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[WorkspaceRoleResponse]: + """ + Get all workspace roles. + + Returns a list of all available workspace roles. + + Returns: + List[WorkspaceRoleResponse]: A list of WorkspaceRole objects representing the available workspace roles. + + Raises: + HTTPException: If an error occurs while retrieving the workspace roles. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[WorkspaceRoleResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.get_all_workspace_roles() + """ + _response = self._client_wrapper.httpx_client.request( + "workspaces/roles", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[WorkspaceRoleResponse], + parse_obj_as( + type_=typing.List[WorkspaceRoleResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_all_workspace_permissions( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Permission]: + """ + Get all workspace permissions. + + Returns a list of all available workspace permissions. + + Returns: + List[Permission]: A list of Permission objects representing the available workspace permissions. + + Raises: + HTTPException: If there is an error retrieving the workspace permissions. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Permission] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.get_all_workspace_permissions() + """ + _response = self._client_wrapper.httpx_client.request( + "workspaces/permissions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Permission], + parse_obj_as( + type_=typing.List[Permission], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def assign_role_to_user( + self, + workspace_id: str, + *, + email: str, + organization_id: str, + role: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Assigns a role to a user in a workspace. + + Args: + payload (UserRole): The payload containing the organization id, user email, and role to assign. + workspace_id (str): The ID of the workspace. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role was successfully assigned, False otherwise. + + Raises: + HTTPException: If the user does not have permission to perform this action. + HTTPException: If there is an error assigning the role to the user. + + Parameters + ---------- + workspace_id : str + + email : str + + organization_id : str + + role : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.assign_role_to_user( + workspace_id="workspace_id", + email="email", + organization_id="organization_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="POST", + json={ + "email": email, + "organization_id": organization_id, + "role": role, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def unassign_role_from_user( + self, + workspace_id: str, + *, + email: str, + org_id: str, + role: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Delete a role assignment from a user in a workspace. + + Args: + workspace_id (str): The ID of the workspace. + email (str): The email of the user to remove the role from. + org_id (str): The ID of the organization. + role (str): The role to remove from the user. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role assignment was successfully deleted. + + Raises: + HTTPException: If there is an error in the request or the user does not have permission to perform the action. + HTTPException: If there is an error in updating the user's roles. + + Parameters + ---------- + workspace_id : str + + email : str + + org_id : str + + role : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.unassign_role_from_user( + workspace_id="workspace_id", + email="email", + org_id="org_id", + role="role", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="DELETE", + params={ + "email": email, + "org_id": org_id, + "role": role, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_user_from_workspace( + self, + workspace_id: str, + *, + org_id: str, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Remove a user from a workspace. + + Parameters: + + - payload (UserRole): The payload containing the user email and organization ID. + - workspace_id (str): The ID of the workspace. + - request (Request): The FastAPI request object. + + Returns: + + - WorkspaceResponse: The updated workspace. + + Raises: + + - HTTPException: If the user does not have permission to perform this action. + - HTTPException: If there is an error during the removal process. + + Parameters + ---------- + workspace_id : str + + org_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.remove_user_from_workspace( + workspace_id="workspace_id", + org_id="org_id", + email="email", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/users", + method="DELETE", + params={ + "org_id": org_id, + "email": email, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def health_check( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.health_check() + """ + _response = self._client_wrapper.httpx_client.request( + "health", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_user_profile( + self, + *, + user_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + user_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.fetch_user_profile() + """ + _response = self._client_wrapper.httpx_client.request( + "profile", + method="GET", + params={ + "user_id": user_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncAgentaApi: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : str + The base url to use for requests from the client. + + api_key : str + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + """ + + def __init__( + self, + *, + base_url: str, + api_key: str, + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None, + ): + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else None + ) + self._client_wrapper = AsyncClientWrapper( + base_url=base_url, + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient( + timeout=_defaulted_timeout, follow_redirects=follow_redirects + ) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.observability = AsyncObservabilityClient( + client_wrapper=self._client_wrapper + ) + self.apps = AsyncAppsClient(client_wrapper=self._client_wrapper) + self.variants = AsyncVariantsClient(client_wrapper=self._client_wrapper) + self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper) + self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper) + self.testsets = AsyncTestsetsClient(client_wrapper=self._client_wrapper) + self.containers = AsyncContainersClient(client_wrapper=self._client_wrapper) + self.environments = AsyncEnvironmentsClient(client_wrapper=self._client_wrapper) + self.bases = AsyncBasesClient(client_wrapper=self._client_wrapper) + self.configs = AsyncConfigsClient(client_wrapper=self._client_wrapper) + self.observability_v_1 = AsyncObservabilityV1Client( + client_wrapper=self._client_wrapper + ) + + async def list_api_keys( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[ListApiKeysResponse]: + """ + List all API keys associated with the authenticated user. + + Args: + request (Request): The incoming request object. + + Returns: + List[ListAPIKeysResponse]: A list of API Keys associated with the user. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ListApiKeysResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.list_api_keys() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "keys", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ListApiKeysResponse], + parse_obj_as( + type_=typing.List[ListApiKeysResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_api_key( + self, + *, + workspace_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Creates an API key for a user. + + Args: + request (Request): The request object containing the user ID in the request state. + + Returns: + str: The created API key. + + Parameters + ---------- + workspace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.create_api_key( + workspace_id="workspace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "keys", + method="POST", + params={ + "workspace_id": workspace_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + str, + parse_obj_as( + type_=str, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Delete an API key with the given key prefix for the authenticated user. + + Args: + key_prefix (str): The prefix of the API key to be deleted. + request (Request): The incoming request object. + + Returns: + dict: A dictionary containing a success message upon successful deletion. + + Raises: + HTTPException: If the API key is not found or does not belong to the user. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.delete_api_key( + key_prefix="key_prefix", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def validate_api_key( + self, + key_prefix: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + This Function is called by the CLI and is used to validate an API key provided by a user in agenta init setup. + Returns: + bool: True. If the request reaches this point, the API key is valid. + + Parameters + ---------- + key_prefix : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.validate_api_key( + key_prefix="key_prefix", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"keys/{jsonable_encoder(key_prefix)}/validate", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_organizations( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Organization]: + """ + Returns a list of organizations associated with the user's session. + + Args: + stoken_session (SessionContainer): The user's session token. + + Returns: + list[Organization]: A list of organizations associated with the user's session. + + Raises: + HTTPException: If there is an error retrieving the organizations from the database. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Organization] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.list_organizations() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "organizations", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Organization], + parse_obj_as( + type_=typing.List[Organization], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_organization( + self, + *, + name: str, + owner: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + name : str + + owner : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.create_organization( + name="name", + owner="owner", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "organizations", + method="POST", + json={ + "name": name, + "owner": owner, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_own_org( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> OrganizationOutput: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + OrganizationOutput + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.get_own_org() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "organizations/own", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + OrganizationOutput, + parse_obj_as( + type_=OrganizationOutput, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_organization_details( + self, org_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Get an organization's details. + + Raises: + HTTPException: _description_ + Permission Denied + + Returns: + OrganizationDB Instance + + Parameters + ---------- + org_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.fetch_organization_details( + org_id="org_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_organization( + self, + org_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.update_organization( + org_id="org_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def invite_user_to_workspace( + self, + org_id: str, + workspace_id: str, + *, + request: typing.Sequence[InviteRequest], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + request : typing.Sequence[InviteRequest] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, InviteRequest + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.invite_user_to_workspace( + org_id="org_id", + workspace_id="workspace_id", + request=[ + InviteRequest( + email="email", + roles=["roles"], + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, + annotation=typing.Sequence[InviteRequest], + direction="write", + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def resend_invitation( + self, + org_id: str, + workspace_id: str, + *, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Resend an invitation to a user to an Organization. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Resent invitation to user; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.resend_invitation( + org_id="org_id", + workspace_id="workspace_id", + email="email", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/resend", + method="POST", + json={ + "email": email, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def accept_invitation( + self, + org_id: str, + workspace_id: str, + *, + project_id: str, + token: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Accept an invitation to a workspace. + + Raises: + HTTPException: _description_; status_code: 500 + HTTPException: Invitation not found or has expired; status_code: 400 + HTTPException: You already belong to this organization; status_code: 400 + + Returns: + JSONResponse: Accepted invitation to workspace; status_code: 200 + + Parameters + ---------- + org_id : str + + workspace_id : str + + project_id : str + + token : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.accept_invitation( + org_id="org_id", + workspace_id="workspace_id", + project_id="project_id", + token="token", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}/invite/accept", + method="POST", + params={ + "project_id": project_id, + }, + json={ + "token": token, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_workspace( + self, + org_id: str, + *, + name: str, + description: typing.Optional[str] = OMIT, + type: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + name : str + + description : typing.Optional[str] + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.create_workspace( + org_id="org_id", + name="name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces", + method="POST", + json={ + "name": name, + "description": description, + "type": type, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_workspace( + self, + org_id: str, + workspace_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + updated_at: typing.Optional[dt.datetime] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Parameters + ---------- + org_id : str + + workspace_id : str + + name : typing.Optional[str] + + description : typing.Optional[str] + + updated_at : typing.Optional[dt.datetime] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.update_workspace( + org_id="org_id", + workspace_id="workspace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"organizations/{jsonable_encoder(org_id)}/workspaces/{jsonable_encoder(workspace_id)}", + method="PUT", + json={ + "name": name, + "description": description, + "updated_at": updated_at, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_all_workspace_roles( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[WorkspaceRoleResponse]: + """ + Get all workspace roles. + + Returns a list of all available workspace roles. + + Returns: + List[WorkspaceRoleResponse]: A list of WorkspaceRole objects representing the available workspace roles. + + Raises: + HTTPException: If an error occurs while retrieving the workspace roles. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[WorkspaceRoleResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.get_all_workspace_roles() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "workspaces/roles", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[WorkspaceRoleResponse], + parse_obj_as( + type_=typing.List[WorkspaceRoleResponse], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_all_workspace_permissions( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Permission]: + """ + Get all workspace permissions. + + Returns a list of all available workspace permissions. + + Returns: + List[Permission]: A list of Permission objects representing the available workspace permissions. + + Raises: + HTTPException: If there is an error retrieving the workspace permissions. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Permission] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.get_all_workspace_permissions() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "workspaces/permissions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Permission], + parse_obj_as( + type_=typing.List[Permission], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def assign_role_to_user( + self, + workspace_id: str, + *, + email: str, + organization_id: str, + role: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Assigns a role to a user in a workspace. + + Args: + payload (UserRole): The payload containing the organization id, user email, and role to assign. + workspace_id (str): The ID of the workspace. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role was successfully assigned, False otherwise. + + Raises: + HTTPException: If the user does not have permission to perform this action. + HTTPException: If there is an error assigning the role to the user. + + Parameters + ---------- + workspace_id : str + + email : str + + organization_id : str + + role : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.assign_role_to_user( + workspace_id="workspace_id", + email="email", + organization_id="organization_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="POST", + json={ + "email": email, + "organization_id": organization_id, + "role": role, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def unassign_role_from_user( + self, + workspace_id: str, + *, + email: str, + org_id: str, + role: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Delete a role assignment from a user in a workspace. + + Args: + workspace_id (str): The ID of the workspace. + email (str): The email of the user to remove the role from. + org_id (str): The ID of the organization. + role (str): The role to remove from the user. + request (Request): The FastAPI request object. + + Returns: + bool: True if the role assignment was successfully deleted. + + Raises: + HTTPException: If there is an error in the request or the user does not have permission to perform the action. + HTTPException: If there is an error in updating the user's roles. + + Parameters + ---------- + workspace_id : str + + email : str + + org_id : str + + role : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.unassign_role_from_user( + workspace_id="workspace_id", + email="email", + org_id="org_id", + role="role", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/roles", + method="DELETE", + params={ + "email": email, + "org_id": org_id, + "role": role, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_user_from_workspace( + self, + workspace_id: str, + *, + org_id: str, + email: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> WorkspaceResponse: + """ + Remove a user from a workspace. + + Parameters: + + - payload (UserRole): The payload containing the user email and organization ID. + - workspace_id (str): The ID of the workspace. + - request (Request): The FastAPI request object. + + Returns: + + - WorkspaceResponse: The updated workspace. + + Raises: + + - HTTPException: If the user does not have permission to perform this action. + - HTTPException: If there is an error during the removal process. + + Parameters + ---------- + workspace_id : str + + org_id : str + + email : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WorkspaceResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.remove_user_from_workspace( + workspace_id="workspace_id", + org_id="org_id", + email="email", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"workspaces/{jsonable_encoder(workspace_id)}/users", + method="DELETE", + params={ + "org_id": org_id, + "email": email, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WorkspaceResponse, + parse_obj_as( + type_=WorkspaceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def health_check( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.health_check() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "health", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_user_profile( + self, + *, + user_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + user_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.fetch_user_profile() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "profile", + method="GET", + params={ + "user_id": user_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/configs/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/configs/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/configs/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/configs/client.py b/services/completion-stateless-sdk/agenta/client/backend/configs/client.py new file mode 100644 index 0000000000..e034ff37b0 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/configs/client.py @@ -0,0 +1,598 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.get_config_response import GetConfigResponse +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.jsonable_encoder import jsonable_encoder +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ConfigsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get_config( + self, + *, + base_id: str, + config_name: typing.Optional[str] = None, + environment_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConfigResponse: + """ + Parameters + ---------- + base_id : str + + config_name : typing.Optional[str] + + environment_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConfigResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.get_config( + base_id="base_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "configs", + method="GET", + params={ + "base_id": base_id, + "config_name": config_name, + "environment_name": environment_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConfigResponse, + parse_obj_as( + type_=GetConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def save_config( + self, + *, + base_id: str, + config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + overwrite: bool, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + base_id : str + + config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + overwrite : bool + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.save_config( + base_id="base_id", + config_name="config_name", + parameters={"key": "value"}, + overwrite=True, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "configs", + method="POST", + json={ + "base_id": base_id, + "config_name": config_name, + "parameters": parameters, + "overwrite": overwrite, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_config_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.get_config_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def revert_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.configs.revert_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}/revert", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncConfigsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_config( + self, + *, + base_id: str, + config_name: typing.Optional[str] = None, + environment_name: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConfigResponse: + """ + Parameters + ---------- + base_id : str + + config_name : typing.Optional[str] + + environment_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GetConfigResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.get_config( + base_id="base_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "configs", + method="GET", + params={ + "base_id": base_id, + "config_name": config_name, + "environment_name": environment_name, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GetConfigResponse, + parse_obj_as( + type_=GetConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def save_config( + self, + *, + base_id: str, + config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + overwrite: bool, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + base_id : str + + config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + overwrite : bool + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.save_config( + base_id="base_id", + config_name="config_name", + parameters={"key": "value"}, + overwrite=True, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "configs", + method="POST", + json={ + "base_id": base_id, + "config_name": config_name, + "parameters": parameters, + "overwrite": overwrite, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_config_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.get_config_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def revert_deployment_revision( + self, + deployment_revision_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + deployment_revision_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.configs.revert_deployment_revision( + deployment_revision_id="deployment_revision_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"configs/deployment/{jsonable_encoder(deployment_revision_id)}/revert", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/containers/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/containers/__init__.py new file mode 100644 index 0000000000..3d1974f7ad --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/containers/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ContainerTemplatesResponse + +__all__ = ["ContainerTemplatesResponse"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/containers/client.py b/services/completion-stateless-sdk/agenta/client/backend/containers/client.py new file mode 100644 index 0000000000..c7180c0a4b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/containers/client.py @@ -0,0 +1,642 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .. import core +from ..core.request_options import RequestOptions +from ..types.image import Image +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .types.container_templates_response import ContainerTemplatesResponse +from ..types.uri import Uri +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ContainersClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def build_image( + self, + *, + app_id: str, + base_name: str, + tar_file: core.File, + request_options: typing.Optional[RequestOptions] = None, + ) -> Image: + """ + Builds a Docker image from a tar file containing the application code. + + Args: + app_id (str): The ID of the application to build the image for. + base_name (str): The base name of the image to build. + tar_file (UploadFile): The tar file containing the application code. + stoken_session (SessionContainer): The session container for the user making the request. + + Returns: + Image: The Docker image that was built. + + Parameters + ---------- + app_id : str + + base_name : str + + tar_file : core.File + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Image + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.build_image( + app_id="app_id", + base_name="base_name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "containers/build_image", + method="POST", + params={ + "app_id": app_id, + "base_name": base_name, + }, + data={}, + files={ + "tar_file": tar_file, + }, + request_options=( + {**request_options, "timeout_in_seconds": 600} + if request_options + else {"timeout_in_seconds": 600} + ), + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Image, + parse_obj_as( + type_=Image, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def restart_container( + self, + *, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Restart docker container. + + Args: + payload (RestartAppContainer) -- the required data (app_name and variant_name) + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.restart_container( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "containers/restart_container", + method="POST", + json={ + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def container_templates( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ContainerTemplatesResponse: + """ + Returns a list of templates available for creating new containers. + + Parameters: + stoken_session (SessionContainer): The session container for the user. + + Returns: + + Union[List[Template], str]: A list of templates or an error message. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ContainerTemplatesResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.container_templates() + """ + _response = self._client_wrapper.httpx_client.request( + "containers/templates", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ContainerTemplatesResponse, + parse_obj_as( + type_=ContainerTemplatesResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def construct_app_container_url( + self, + *, + base_id: typing.Optional[str] = None, + variant_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Constructs the URL for an app container based on the provided base_id or variant_id. + + Args: + base_id (Optional[str]): The ID of the base to use for the app container. + variant_id (Optional[str]): The ID of the variant to use for the app container. + request (Request): The request object. + + Returns: + URI: The URI for the app container. + + Raises: + HTTPException: If the base or variant cannot be found or the user does not have access. + + Parameters + ---------- + base_id : typing.Optional[str] + + variant_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.containers.construct_app_container_url() + """ + _response = self._client_wrapper.httpx_client.request( + "containers/container_url", + method="GET", + params={ + "base_id": base_id, + "variant_id": variant_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncContainersClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def build_image( + self, + *, + app_id: str, + base_name: str, + tar_file: core.File, + request_options: typing.Optional[RequestOptions] = None, + ) -> Image: + """ + Builds a Docker image from a tar file containing the application code. + + Args: + app_id (str): The ID of the application to build the image for. + base_name (str): The base name of the image to build. + tar_file (UploadFile): The tar file containing the application code. + stoken_session (SessionContainer): The session container for the user making the request. + + Returns: + Image: The Docker image that was built. + + Parameters + ---------- + app_id : str + + base_name : str + + tar_file : core.File + See core.File for more documentation + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Image + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.build_image( + app_id="app_id", + base_name="base_name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/build_image", + method="POST", + params={ + "app_id": app_id, + "base_name": base_name, + }, + data={}, + files={ + "tar_file": tar_file, + }, + request_options=( + {**request_options, "timeout_in_seconds": 600} + if request_options + else {"timeout_in_seconds": 600} + ), + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Image, + parse_obj_as( + type_=Image, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def restart_container( + self, + *, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Dict[str, typing.Optional[typing.Any]]: + """ + Restart docker container. + + Args: + payload (RestartAppContainer) -- the required data (app_name and variant_name) + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Dict[str, typing.Optional[typing.Any]] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.restart_container( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/restart_container", + method="POST", + json={ + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Dict[str, typing.Optional[typing.Any]], + parse_obj_as( + type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def container_templates( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ContainerTemplatesResponse: + """ + Returns a list of templates available for creating new containers. + + Parameters: + stoken_session (SessionContainer): The session container for the user. + + Returns: + + Union[List[Template], str]: A list of templates or an error message. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ContainerTemplatesResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.container_templates() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/templates", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ContainerTemplatesResponse, + parse_obj_as( + type_=ContainerTemplatesResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def construct_app_container_url( + self, + *, + base_id: typing.Optional[str] = None, + variant_id: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Constructs the URL for an app container based on the provided base_id or variant_id. + + Args: + base_id (Optional[str]): The ID of the base to use for the app container. + variant_id (Optional[str]): The ID of the variant to use for the app container. + request (Request): The request object. + + Returns: + URI: The URI for the app container. + + Raises: + HTTPException: If the base or variant cannot be found or the user does not have access. + + Parameters + ---------- + base_id : typing.Optional[str] + + variant_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.containers.construct_app_container_url() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "containers/container_url", + method="GET", + params={ + "base_id": base_id, + "variant_id": variant_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/containers/types/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/containers/types/__init__.py new file mode 100644 index 0000000000..b68c5ab25e --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/containers/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .container_templates_response import ContainerTemplatesResponse + +__all__ = ["ContainerTemplatesResponse"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/containers/types/container_templates_response.py b/services/completion-stateless-sdk/agenta/client/backend/containers/types/container_templates_response.py new file mode 100644 index 0000000000..27177d4d0a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/containers/types/container_templates_response.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.template import Template + +ContainerTemplatesResponse = typing.Union[typing.List[Template], str] diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/core/__init__.py new file mode 100644 index 0000000000..f03aecbfe1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/__init__.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from .api_error import ApiError +from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper +from .datetime_utils import serialize_datetime +from .file import File, convert_file_dict_to_httpx_tuples, with_content_type +from .http_client import AsyncHttpClient, HttpClient +from .jsonable_encoder import jsonable_encoder +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + UniversalBaseModel, + UniversalRootModel, + parse_obj_as, + universal_field_validator, + universal_root_validator, + update_forward_refs, +) +from .query_encoder import encode_query +from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions +from .serialization import FieldMetadata, convert_and_respect_annotation_metadata + +__all__ = [ + "ApiError", + "AsyncClientWrapper", + "AsyncHttpClient", + "BaseClientWrapper", + "FieldMetadata", + "File", + "HttpClient", + "IS_PYDANTIC_V2", + "RequestOptions", + "SyncClientWrapper", + "UniversalBaseModel", + "UniversalRootModel", + "convert_and_respect_annotation_metadata", + "convert_file_dict_to_httpx_tuples", + "encode_query", + "jsonable_encoder", + "parse_obj_as", + "remove_none_from_dict", + "serialize_datetime", + "universal_field_validator", + "universal_root_validator", + "update_forward_refs", + "with_content_type", +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/api_error.py b/services/completion-stateless-sdk/agenta/client/backend/core/api_error.py new file mode 100644 index 0000000000..da734b5806 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/api_error.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + + +class ApiError(Exception): + status_code: typing.Optional[int] + body: typing.Any + + def __init__( + self, *, status_code: typing.Optional[int] = None, body: typing.Any = None + ): + self.status_code = status_code + self.body = body + + def __str__(self) -> str: + return f"status_code: {self.status_code}, body: {self.body}" diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/client_wrapper.py b/services/completion-stateless-sdk/agenta/client/backend/core/client_wrapper.py new file mode 100644 index 0000000000..3c2a647bb4 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/client_wrapper.py @@ -0,0 +1,64 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +import httpx +from .http_client import HttpClient +from .http_client import AsyncHttpClient + + +class BaseClientWrapper: + def __init__( + self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None + ): + self.api_key = api_key + self._base_url = base_url + self._timeout = timeout + + def get_headers(self) -> typing.Dict[str, str]: + headers: typing.Dict[str, str] = { + "X-Fern-Language": "Python", + } + headers["Authorization"] = self.api_key + return headers + + def get_base_url(self) -> str: + return self._base_url + + def get_timeout(self) -> typing.Optional[float]: + return self._timeout + + +class SyncClientWrapper(BaseClientWrapper): + def __init__( + self, + *, + api_key: str, + base_url: str, + timeout: typing.Optional[float] = None, + httpx_client: httpx.Client, + ): + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.httpx_client = HttpClient( + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, + ) + + +class AsyncClientWrapper(BaseClientWrapper): + def __init__( + self, + *, + api_key: str, + base_url: str, + timeout: typing.Optional[float] = None, + httpx_client: httpx.AsyncClient, + ): + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.httpx_client = AsyncHttpClient( + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, + ) diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/datetime_utils.py b/services/completion-stateless-sdk/agenta/client/backend/core/datetime_utils.py new file mode 100644 index 0000000000..47344e9d9c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/datetime_utils.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + + +def serialize_datetime(v: dt.datetime) -> str: + """ + Serialize a datetime including timezone info. + + Uses the timezone info provided if present, otherwise uses the current runtime's timezone info. + + UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00. + """ + + def _serialize_zoned_datetime(v: dt.datetime) -> str: + if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname( + None + ): + # UTC is a special case where we use "Z" at the end instead of "+00:00" + return v.isoformat().replace("+00:00", "Z") + else: + # Delegate to the typical +/- offset format + return v.isoformat() + + if v.tzinfo is not None: + return _serialize_zoned_datetime(v) + else: + local_tz = dt.datetime.now().astimezone().tzinfo + localized_dt = v.replace(tzinfo=local_tz) + return _serialize_zoned_datetime(localized_dt) diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/file.py b/services/completion-stateless-sdk/agenta/client/backend/core/file.py new file mode 100644 index 0000000000..a9623d336a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/file.py @@ -0,0 +1,65 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import IO, Dict, List, Mapping, Optional, Tuple, Union, cast + +# File typing inspired by the flexibility of types within the httpx library +# https://github.com/encode/httpx/blob/master/httpx/_types.py +FileContent = Union[IO[bytes], bytes, str] +File = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[ + Optional[str], + FileContent, + Optional[str], + Mapping[str, str], + ], +] + + +def convert_file_dict_to_httpx_tuples( + d: Dict[str, Union[File, List[File]]], +) -> List[Tuple[str, File]]: + """ + The format we use is a list of tuples, where the first element is the + name of the file and the second is the file object. Typically HTTPX wants + a dict, but to be able to send lists of files, you have to use the list + approach (which also works for non-lists) + https://github.com/encode/httpx/pull/1032 + """ + + httpx_tuples = [] + for key, file_like in d.items(): + if isinstance(file_like, list): + for file_like_item in file_like: + httpx_tuples.append((key, file_like_item)) + else: + httpx_tuples.append((key, file_like)) + return httpx_tuples + + +def with_content_type(*, file: File, content_type: str) -> File: + """ """ + if isinstance(file, tuple): + if len(file) == 2: + filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore + return (filename, content, content_type) + elif len(file) == 3: + filename, content, _ = cast( + Tuple[Optional[str], FileContent, Optional[str]], file + ) # type: ignore + return (filename, content, content_type) + elif len(file) == 4: + filename, content, _, headers = cast( # type: ignore + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], + file, + ) + return (filename, content, content_type, headers) + else: + raise ValueError(f"Unexpected tuple length: {len(file)}") + return (None, file, content_type) diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/http_client.py b/services/completion-stateless-sdk/agenta/client/backend/core/http_client.py new file mode 100644 index 0000000000..c9f1d7a59b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/http_client.py @@ -0,0 +1,575 @@ +# This file was auto-generated by Fern from our API Definition. + +import asyncio +import email.utils +import json +import re +import time +import typing +import urllib.parse +from contextlib import asynccontextmanager, contextmanager +from random import random + +import httpx + +from .file import File, convert_file_dict_to_httpx_tuples +from .jsonable_encoder import jsonable_encoder +from .query_encoder import encode_query +from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions + +INITIAL_RETRY_DELAY_SECONDS = 0.5 +MAX_RETRY_DELAY_SECONDS = 10 +MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30 + + +def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]: + """ + This function parses the `Retry-After` header in a HTTP response and returns the number of seconds to wait. + + Inspired by the urllib3 retry implementation. + """ + retry_after_ms = response_headers.get("retry-after-ms") + if retry_after_ms is not None: + try: + return int(retry_after_ms) / 1000 if retry_after_ms > 0 else 0 + except Exception: + pass + + retry_after = response_headers.get("retry-after") + if retry_after is None: + return None + + # Attempt to parse the header as an int. + if re.match(r"^\s*[0-9]+\s*$", retry_after): + seconds = float(retry_after) + # Fallback to parsing it as a date. + else: + retry_date_tuple = email.utils.parsedate_tz(retry_after) + if retry_date_tuple is None: + return None + if retry_date_tuple[9] is None: # Python 2 + # Assume UTC if no timezone was specified + # On Python2.7, parsedate_tz returns None for a timezone offset + # instead of 0 if no timezone is given, where mktime_tz treats + # a None timezone offset as local time. + retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:] + + retry_date = email.utils.mktime_tz(retry_date_tuple) + seconds = retry_date - time.time() + + if seconds < 0: + seconds = 0 + + return seconds + + +def _retry_timeout(response: httpx.Response, retries: int) -> float: + """ + Determine the amount of time to wait before retrying a request. + This function begins by trying to parse a retry-after header from the response, and then proceeds to use exponential backoff + with a jitter to determine the number of seconds to wait. + """ + + # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + retry_after = _parse_retry_after(response.headers) + if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER: + return retry_after + + # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS. + retry_delay = min( + INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS + ) + + # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries. + timeout = retry_delay * (1 - 0.25 * random()) + return timeout if timeout >= 0 else 0 + + +def _should_retry(response: httpx.Response) -> bool: + retriable_400s = [429, 408, 409] + return response.status_code >= 500 or response.status_code in retriable_400s + + +def remove_omit_from_dict( + original: typing.Dict[str, typing.Optional[typing.Any]], + omit: typing.Optional[typing.Any], +) -> typing.Dict[str, typing.Any]: + if omit is None: + return original + new: typing.Dict[str, typing.Any] = {} + for key, value in original.items(): + if value is not omit: + new[key] = value + return new + + +def maybe_filter_request_body( + data: typing.Optional[typing.Any], + request_options: typing.Optional[RequestOptions], + omit: typing.Optional[typing.Any], +) -> typing.Optional[typing.Any]: + if data is None: + return ( + jsonable_encoder(request_options.get("additional_body_parameters", {})) + or {} + if request_options is not None + else None + ) + elif not isinstance(data, typing.Mapping): + data_content = jsonable_encoder(data) + else: + data_content = { + **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore + **( + jsonable_encoder(request_options.get("additional_body_parameters", {})) + or {} + if request_options is not None + else {} + ), + } + return data_content + + +# Abstracted out for testing purposes +def get_request_body( + *, + json: typing.Optional[typing.Any], + data: typing.Optional[typing.Any], + request_options: typing.Optional[RequestOptions], + omit: typing.Optional[typing.Any], +) -> typing.Tuple[typing.Optional[typing.Any], typing.Optional[typing.Any]]: + json_body = None + data_body = None + if data is not None: + data_body = maybe_filter_request_body(data, request_options, omit) + else: + # If both data and json are None, we send json data in the event extra properties are specified + json_body = maybe_filter_request_body(json, request_options, omit) + + # If you have an empty JSON body, you should just send None + return ( + json_body if json_body != {} else None + ), data_body if data_body != {} else None + + +class HttpClient: + def __init__( + self, + *, + httpx_client: httpx.Client, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, + ): + self.base_url = base_url + self.base_timeout = base_timeout + self.base_headers = base_headers + self.httpx_client = httpx_client + + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + + if base_url is None: + raise ValueError( + "A base_url is required to make this request, please provide one and try again." + ) + return base_url + + def request( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> httpx.Response: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + response = self.httpx_client.request( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) or {} + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if (files is not None and files is not omit) + else None + ), + timeout=timeout, + ) + + max_retries: int = ( + request_options.get("max_retries", 0) if request_options is not None else 0 + ) + if _should_retry(response=response): + if max_retries > retries: + time.sleep(_retry_timeout(response=response, retries=retries)) + return self.request( + path=path, + method=method, + base_url=base_url, + params=params, + json=json, + content=content, + files=files, + headers=headers, + request_options=request_options, + retries=retries + 1, + omit=omit, + ) + + return response + + @contextmanager + def stream( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> typing.Iterator[httpx.Response]: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + with self.httpx_client.stream( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if (files is not None and files is not omit) + else None + ), + timeout=timeout, + ) as stream: + yield stream + + +class AsyncHttpClient: + def __init__( + self, + *, + httpx_client: httpx.AsyncClient, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, + ): + self.base_url = base_url + self.base_timeout = base_timeout + self.base_headers = base_headers + self.httpx_client = httpx_client + + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + + if base_url is None: + raise ValueError( + "A base_url is required to make this request, please provide one and try again." + ) + return base_url + + async def request( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> httpx.Response: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + # Add the input to each of these and do None-safety checks + response = await self.httpx_client.request( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) or {} + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if files is not None + else None + ), + timeout=timeout, + ) + + max_retries: int = ( + request_options.get("max_retries", 0) if request_options is not None else 0 + ) + if _should_retry(response=response): + if max_retries > retries: + await asyncio.sleep(_retry_timeout(response=response, retries=retries)) + return await self.request( + path=path, + method=method, + base_url=base_url, + params=params, + json=json, + content=content, + files=files, + headers=headers, + request_options=request_options, + retries=retries + 1, + omit=omit, + ) + return response + + @asynccontextmanager + async def stream( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[ + typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]] + ] = None, + files: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]] + ] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 0, + omit: typing.Optional[typing.Any] = None, + ) -> typing.AsyncIterator[httpx.Response]: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None + and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body( + json=json, data=data, request_options=request_options, omit=omit + ) + + async with self.httpx_client.stream( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) + if request_options is not None + else {} + ), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get( + "additional_query_parameters", {} + ) + if request_options is not None + else {} + ), + }, + omit=omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples( + remove_omit_from_dict(remove_none_from_dict(files), omit) + ) + if files is not None + else None + ), + timeout=timeout, + ) as stream: + yield stream diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/jsonable_encoder.py b/services/completion-stateless-sdk/agenta/client/backend/core/jsonable_encoder.py new file mode 100644 index 0000000000..12a8b52fc2 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/jsonable_encoder.py @@ -0,0 +1,103 @@ +# This file was auto-generated by Fern from our API Definition. + +""" +jsonable_encoder converts a Python object to a JSON-friendly dict +(e.g. datetimes to strings, Pydantic models to dicts). + +Taken from FastAPI, and made a bit simpler +https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py +""" + +import base64 +import dataclasses +import datetime as dt +from enum import Enum +from pathlib import PurePath +from types import GeneratorType +from typing import Any, Callable, Dict, List, Optional, Set, Union + +import pydantic + +from .datetime_utils import serialize_datetime +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + encode_by_type, + to_jsonable_with_fallback, +) + +SetIntStr = Set[Union[int, str]] +DictIntStrAny = Dict[Union[int, str], Any] + + +def jsonable_encoder( + obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None +) -> Any: + custom_encoder = custom_encoder or {} + if custom_encoder: + if type(obj) in custom_encoder: + return custom_encoder[type(obj)](obj) + else: + for encoder_type, encoder_instance in custom_encoder.items(): + if isinstance(obj, encoder_type): + return encoder_instance(obj) + if isinstance(obj, pydantic.BaseModel): + if IS_PYDANTIC_V2: + encoder = getattr(obj.model_config, "json_encoders", {}) # type: ignore # Pydantic v2 + else: + encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1 + if custom_encoder: + encoder.update(custom_encoder) + obj_dict = obj.dict(by_alias=True) + if "__root__" in obj_dict: + obj_dict = obj_dict["__root__"] + if "root" in obj_dict: + obj_dict = obj_dict["root"] + return jsonable_encoder(obj_dict, custom_encoder=encoder) + if dataclasses.is_dataclass(obj): + obj_dict = dataclasses.asdict(obj) # type: ignore + return jsonable_encoder(obj_dict, custom_encoder=custom_encoder) + if isinstance(obj, bytes): + return base64.b64encode(obj).decode("utf-8") + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, PurePath): + return str(obj) + if isinstance(obj, (str, int, float, type(None))): + return obj + if isinstance(obj, dt.datetime): + return serialize_datetime(obj) + if isinstance(obj, dt.date): + return str(obj) + if isinstance(obj, dict): + encoded_dict = {} + allowed_keys = set(obj.keys()) + for key, value in obj.items(): + if key in allowed_keys: + encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder) + encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder) + encoded_dict[encoded_key] = encoded_value + return encoded_dict + if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): + encoded_list = [] + for item in obj: + encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder)) + return encoded_list + + def fallback_serializer(o: Any) -> Any: + attempt_encode = encode_by_type(o) + if attempt_encode is not None: + return attempt_encode + + try: + data = dict(o) + except Exception as e: + errors: List[Exception] = [] + errors.append(e) + try: + data = vars(o) + except Exception as e: + errors.append(e) + raise ValueError(errors) from e + return jsonable_encoder(data, custom_encoder=custom_encoder) + + return to_jsonable_with_fallback(obj, fallback_serializer) diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/pydantic_utilities.py b/services/completion-stateless-sdk/agenta/client/backend/core/pydantic_utilities.py new file mode 100644 index 0000000000..b4b9605ebc --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/pydantic_utilities.py @@ -0,0 +1,325 @@ +# This file was auto-generated by Fern from our API Definition. + +# nopycln: file +import datetime as dt +import typing +from collections import defaultdict + +import typing_extensions + +import pydantic + +from .datetime_utils import serialize_datetime +from .serialization import convert_and_respect_annotation_metadata + +IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + +if IS_PYDANTIC_V2: + # isort will try to reformat the comments on these imports, which breaks mypy + # isort: off + from pydantic.v1.datetime_parse import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 + parse_date as parse_date, + ) + from pydantic.v1.datetime_parse import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + parse_datetime as parse_datetime, + ) + from pydantic.v1.json import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 + ENCODERS_BY_TYPE as encoders_by_type, + ) + from pydantic.v1.typing import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 + get_args as get_args, + ) + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + get_origin as get_origin, + ) + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + is_literal_type as is_literal_type, + ) + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + is_union as is_union, + ) + from pydantic.v1.fields import ModelField as ModelField # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 +else: + from pydantic.datetime_parse import parse_date as parse_date # type: ignore # Pydantic v1 + from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore # Pydantic v1 + from pydantic.fields import ModelField as ModelField # type: ignore # Pydantic v1 + from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore # Pydantic v1 + from pydantic.typing import get_args as get_args # type: ignore # Pydantic v1 + from pydantic.typing import get_origin as get_origin # type: ignore # Pydantic v1 + from pydantic.typing import is_literal_type as is_literal_type # type: ignore # Pydantic v1 + from pydantic.typing import is_union as is_union # type: ignore # Pydantic v1 + + # isort: on + + +T = typing.TypeVar("T") +Model = typing.TypeVar("Model", bound=pydantic.BaseModel) + + +def parse_obj_as(type_: typing.Type[T], object_: typing.Any) -> T: + dealiased_object = convert_and_respect_annotation_metadata( + object_=object_, annotation=type_, direction="read" + ) + if IS_PYDANTIC_V2: + adapter = pydantic.TypeAdapter(type_) # type: ignore # Pydantic v2 + return adapter.validate_python(dealiased_object) + else: + return pydantic.parse_obj_as(type_, dealiased_object) + + +def to_jsonable_with_fallback( + obj: typing.Any, fallback_serializer: typing.Callable[[typing.Any], typing.Any] +) -> typing.Any: + if IS_PYDANTIC_V2: + from pydantic_core import to_jsonable_python + + return to_jsonable_python(obj, fallback=fallback_serializer) + else: + return fallback_serializer(obj) + + +class UniversalBaseModel(pydantic.BaseModel): + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + # Allow fields begining with `model_` to be used in the model + protected_namespaces=(), + ) # type: ignore # Pydantic v2 + + @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore # Pydantic v2 + def serialize_model( + self, handler: pydantic.SerializerFunctionWrapHandler + ) -> typing.Any: # type: ignore # Pydantic v2 + serialized = handler(self) + data = { + k: serialize_datetime(v) if isinstance(v, dt.datetime) else v + for k, v in serialized.items() + } + return data + + else: + + class Config: + smart_union = True + json_encoders = {dt.datetime: serialize_datetime} + + @classmethod + def model_construct( + cls: typing.Type["Model"], + _fields_set: typing.Optional[typing.Set[str]] = None, + **values: typing.Any, + ) -> "Model": + dealiased_object = convert_and_respect_annotation_metadata( + object_=values, annotation=cls, direction="read" + ) + return cls.construct(_fields_set, **dealiased_object) + + @classmethod + def construct( + cls: typing.Type["Model"], + _fields_set: typing.Optional[typing.Set[str]] = None, + **values: typing.Any, + ) -> "Model": + dealiased_object = convert_and_respect_annotation_metadata( + object_=values, annotation=cls, direction="read" + ) + if IS_PYDANTIC_V2: + return super().model_construct(_fields_set, **dealiased_object) # type: ignore # Pydantic v2 + else: + return super().construct(_fields_set, **dealiased_object) + + def json(self, **kwargs: typing.Any) -> str: + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + if IS_PYDANTIC_V2: + return super().model_dump_json(**kwargs_with_defaults) # type: ignore # Pydantic v2 + else: + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: + """ + Override the default dict method to `exclude_unset` by default. This function patches + `exclude_unset` to work include fields within non-None default values. + """ + # Note: the logic here is multi-plexed given the levers exposed in Pydantic V1 vs V2 + # Pydantic V1's .dict can be extremely slow, so we do not want to call it twice. + # + # We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models + # that we have less control over, and this is less intrusive than custom serializers for now. + if IS_PYDANTIC_V2: + kwargs_with_defaults_exclude_unset: typing.Any = { + **kwargs, + "by_alias": True, + "exclude_unset": True, + "exclude_none": False, + } + kwargs_with_defaults_exclude_none: typing.Any = { + **kwargs, + "by_alias": True, + "exclude_none": True, + "exclude_unset": False, + } + dict_dump = deep_union_pydantic_dicts( + super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore # Pydantic v2 + super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore # Pydantic v2 + ) + + else: + _fields_set = self.__fields_set__.copy() + + fields = _get_model_fields(self.__class__) + for name, field in fields.items(): + if name not in _fields_set: + default = _get_field_default(field) + + # If the default values are non-null act like they've been set + # This effectively allows exclude_unset to work like exclude_none where + # the latter passes through intentionally set none values. + if default is not None or ( + "exclude_unset" in kwargs and not kwargs["exclude_unset"] + ): + _fields_set.add(name) + + if default is not None: + self.__fields_set__.add(name) + + kwargs_with_defaults_exclude_unset_include_fields: typing.Any = { + "by_alias": True, + "exclude_unset": True, + "include": _fields_set, + **kwargs, + } + + dict_dump = super().dict( + **kwargs_with_defaults_exclude_unset_include_fields + ) + + return convert_and_respect_annotation_metadata( + object_=dict_dump, annotation=self.__class__, direction="write" + ) + + +def _union_list_of_pydantic_dicts( + source: typing.List[typing.Any], destination: typing.List[typing.Any] +) -> typing.List[typing.Any]: + converted_list: typing.List[typing.Any] = [] + for i, item in enumerate(source): + destination_value = destination[i] # type: ignore + if isinstance(item, dict): + converted_list.append(deep_union_pydantic_dicts(item, destination_value)) + elif isinstance(item, list): + converted_list.append( + _union_list_of_pydantic_dicts(item, destination_value) + ) + else: + converted_list.append(item) + return converted_list + + +def deep_union_pydantic_dicts( + source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any] +) -> typing.Dict[str, typing.Any]: + for key, value in source.items(): + node = destination.setdefault(key, {}) + if isinstance(value, dict): + deep_union_pydantic_dicts(value, node) + # Note: we do not do this same processing for sets given we do not have sets of models + # and given the sets are unordered, the processing of the set and matching objects would + # be non-trivial. + elif isinstance(value, list): + destination[key] = _union_list_of_pydantic_dicts(value, node) + else: + destination[key] = value + + return destination + + +if IS_PYDANTIC_V2: + + class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore # Pydantic v2 + pass + + UniversalRootModel: typing_extensions.TypeAlias = V2RootModel # type: ignore +else: + UniversalRootModel: typing_extensions.TypeAlias = UniversalBaseModel # type: ignore + + +def encode_by_type(o: typing.Any) -> typing.Any: + encoders_by_class_tuples: typing.Dict[ + typing.Callable[[typing.Any], typing.Any], typing.Tuple[typing.Any, ...] + ] = defaultdict(tuple) + for type_, encoder in encoders_by_type.items(): + encoders_by_class_tuples[encoder] += (type_,) + + if type(o) in encoders_by_type: + return encoders_by_type[type(o)](o) + for encoder, classes_tuple in encoders_by_class_tuples.items(): + if isinstance(o, classes_tuple): + return encoder(o) + + +def update_forward_refs(model: typing.Type["Model"], **localns: typing.Any) -> None: + if IS_PYDANTIC_V2: + model.model_rebuild(raise_errors=False) # type: ignore # Pydantic v2 + else: + model.update_forward_refs(**localns) + + +# Mirrors Pydantic's internal typing +AnyCallable = typing.Callable[..., typing.Any] + + +def universal_root_validator( + pre: bool = False, +) -> typing.Callable[[AnyCallable], AnyCallable]: + def decorator(func: AnyCallable) -> AnyCallable: + if IS_PYDANTIC_V2: + return pydantic.model_validator(mode="before" if pre else "after")(func) # type: ignore # Pydantic v2 + else: + return pydantic.root_validator(pre=pre)(func) # type: ignore # Pydantic v1 + + return decorator + + +def universal_field_validator( + field_name: str, pre: bool = False +) -> typing.Callable[[AnyCallable], AnyCallable]: + def decorator(func: AnyCallable) -> AnyCallable: + if IS_PYDANTIC_V2: + return pydantic.field_validator( + field_name, mode="before" if pre else "after" + )( + func + ) # type: ignore # Pydantic v2 + else: + return pydantic.validator(field_name, pre=pre)(func) # type: ignore # Pydantic v1 + + return decorator + + +PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo] + + +def _get_model_fields( + model: typing.Type["Model"], +) -> typing.Mapping[str, PydanticField]: + if IS_PYDANTIC_V2: + return model.model_fields # type: ignore # Pydantic v2 + else: + return model.__fields__ # type: ignore # Pydantic v1 + + +def _get_field_default(field: PydanticField) -> typing.Any: + try: + value = field.get_default() # type: ignore # Pydantic < v1.10.15 + except: + value = field.default + if IS_PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/query_encoder.py b/services/completion-stateless-sdk/agenta/client/backend/core/query_encoder.py new file mode 100644 index 0000000000..03fbf59bd1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/query_encoder.py @@ -0,0 +1,60 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, List, Optional, Tuple + +import pydantic + + +# Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict +def traverse_query_dict( + dict_flat: Dict[str, Any], key_prefix: Optional[str] = None +) -> List[Tuple[str, Any]]: + result = [] + for k, v in dict_flat.items(): + key = f"{key_prefix}[{k}]" if key_prefix is not None else k + if isinstance(v, dict): + result.extend(traverse_query_dict(v, key)) + elif isinstance(v, list): + for arr_v in v: + if isinstance(arr_v, dict): + result.extend(traverse_query_dict(arr_v, key)) + else: + result.append((key, arr_v)) + else: + result.append((key, v)) + return result + + +def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]: + if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict): + if isinstance(query_value, pydantic.BaseModel): + obj_dict = query_value.dict(by_alias=True) + else: + obj_dict = query_value + return traverse_query_dict(obj_dict, query_key) + elif isinstance(query_value, list): + encoded_values: List[Tuple[str, Any]] = [] + for value in query_value: + if isinstance(value, pydantic.BaseModel) or isinstance(value, dict): + if isinstance(value, pydantic.BaseModel): + obj_dict = value.dict(by_alias=True) + elif isinstance(value, dict): + obj_dict = value + + encoded_values.extend(single_query_encoder(query_key, obj_dict)) + else: + encoded_values.append((query_key, value)) + + return encoded_values + + return [(query_key, query_value)] + + +def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]: + if query is None: + return None + + encoded_query = [] + for k, v in query.items(): + encoded_query.extend(single_query_encoder(k, v)) + return encoded_query diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/remove_none_from_dict.py b/services/completion-stateless-sdk/agenta/client/backend/core/remove_none_from_dict.py new file mode 100644 index 0000000000..c2298143f1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/remove_none_from_dict.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, Mapping, Optional + + +def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]: + new: Dict[str, Any] = {} + for key, value in original.items(): + if value is not None: + new[key] = value + return new diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/request_options.py b/services/completion-stateless-sdk/agenta/client/backend/core/request_options.py new file mode 100644 index 0000000000..1b38804432 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/request_options.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +try: + from typing import NotRequired # type: ignore +except ImportError: + from typing_extensions import NotRequired + + +class RequestOptions(typing.TypedDict, total=False): + """ + Additional options for request-specific configuration when calling APIs via the SDK. + This is used primarily as an optional final parameter for service functions. + + Attributes: + - timeout_in_seconds: int. The number of seconds to await an API call before timing out. + + - max_retries: int. The max number of retries to attempt if the API call fails. + + - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict + + - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict + + - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict + + - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads. + """ + + timeout_in_seconds: NotRequired[int] + max_retries: NotRequired[int] + additional_headers: NotRequired[typing.Dict[str, typing.Any]] + additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]] + additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]] + chunk_size: NotRequired[int] diff --git a/services/completion-stateless-sdk/agenta/client/backend/core/serialization.py b/services/completion-stateless-sdk/agenta/client/backend/core/serialization.py new file mode 100644 index 0000000000..5679deb8a5 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/core/serialization.py @@ -0,0 +1,276 @@ +# This file was auto-generated by Fern from our API Definition. + +import collections +import inspect +import typing + +import typing_extensions + +import pydantic + + +class FieldMetadata: + """ + Metadata class used to annotate fields to provide additional information. + + Example: + class MyDict(TypedDict): + field: typing.Annotated[str, FieldMetadata(alias="field_name")] + + Will serialize: `{"field": "value"}` + To: `{"field_name": "value"}` + """ + + alias: str + + def __init__(self, *, alias: str) -> None: + self.alias = alias + + +def convert_and_respect_annotation_metadata( + *, + object_: typing.Any, + annotation: typing.Any, + inner_type: typing.Optional[typing.Any] = None, + direction: typing.Literal["read", "write"], +) -> typing.Any: + """ + Respect the metadata annotations on a field, such as aliasing. This function effectively + manipulates the dict-form of an object to respect the metadata annotations. This is primarily used for + TypedDicts, which cannot support aliasing out of the box, and can be extended for additional + utilities, such as defaults. + + Parameters + ---------- + object_ : typing.Any + + annotation : type + The type we're looking to apply typing annotations from + + inner_type : typing.Optional[type] + + Returns + ------- + typing.Any + """ + + if object_ is None: + return None + if inner_type is None: + inner_type = annotation + + clean_type = _remove_annotations(inner_type) + # Pydantic models + if ( + inspect.isclass(clean_type) + and issubclass(clean_type, pydantic.BaseModel) + and isinstance(object_, typing.Mapping) + ): + return _convert_mapping(object_, clean_type, direction) + # TypedDicts + if typing_extensions.is_typeddict(clean_type) and isinstance( + object_, typing.Mapping + ): + return _convert_mapping(object_, clean_type, direction) + + if ( + typing_extensions.get_origin(clean_type) == typing.Dict + or typing_extensions.get_origin(clean_type) == dict + or clean_type == typing.Dict + ) and isinstance(object_, typing.Dict): + key_type = typing_extensions.get_args(clean_type)[0] + value_type = typing_extensions.get_args(clean_type)[1] + + return { + key: convert_and_respect_annotation_metadata( + object_=value, + annotation=annotation, + inner_type=value_type, + direction=direction, + ) + for key, value in object_.items() + } + + # If you're iterating on a string, do not bother to coerce it to a sequence. + if not isinstance(object_, str): + if ( + typing_extensions.get_origin(clean_type) == typing.Set + or typing_extensions.get_origin(clean_type) == set + or clean_type == typing.Set + ) and isinstance(object_, typing.Set): + inner_type = typing_extensions.get_args(clean_type)[0] + return { + convert_and_respect_annotation_metadata( + object_=item, + annotation=annotation, + inner_type=inner_type, + direction=direction, + ) + for item in object_ + } + elif ( + ( + typing_extensions.get_origin(clean_type) == typing.List + or typing_extensions.get_origin(clean_type) == list + or clean_type == typing.List + ) + and isinstance(object_, typing.List) + ) or ( + ( + typing_extensions.get_origin(clean_type) == typing.Sequence + or typing_extensions.get_origin(clean_type) == collections.abc.Sequence + or clean_type == typing.Sequence + ) + and isinstance(object_, typing.Sequence) + ): + inner_type = typing_extensions.get_args(clean_type)[0] + return [ + convert_and_respect_annotation_metadata( + object_=item, + annotation=annotation, + inner_type=inner_type, + direction=direction, + ) + for item in object_ + ] + + if typing_extensions.get_origin(clean_type) == typing.Union: + # We should be able to ~relatively~ safely try to convert keys against all + # member types in the union, the edge case here is if one member aliases a field + # of the same name to a different name from another member + # Or if another member aliases a field of the same name that another member does not. + for member in typing_extensions.get_args(clean_type): + object_ = convert_and_respect_annotation_metadata( + object_=object_, + annotation=annotation, + inner_type=member, + direction=direction, + ) + return object_ + + annotated_type = _get_annotation(annotation) + if annotated_type is None: + return object_ + + # If the object is not a TypedDict, a Union, or other container (list, set, sequence, etc.) + # Then we can safely call it on the recursive conversion. + return object_ + + +def _convert_mapping( + object_: typing.Mapping[str, object], + expected_type: typing.Any, + direction: typing.Literal["read", "write"], +) -> typing.Mapping[str, object]: + converted_object: typing.Dict[str, object] = {} + annotations = typing_extensions.get_type_hints(expected_type, include_extras=True) + aliases_to_field_names = _get_alias_to_field_name(annotations) + for key, value in object_.items(): + if direction == "read" and key in aliases_to_field_names: + dealiased_key = aliases_to_field_names.get(key) + if dealiased_key is not None: + type_ = annotations.get(dealiased_key) + else: + type_ = annotations.get(key) + # Note you can't get the annotation by the field name if you're in read mode, so you must check the aliases map + # + # So this is effectively saying if we're in write mode, and we don't have a type, or if we're in read mode and we don't have an alias + # then we can just pass the value through as is + if type_ is None: + converted_object[key] = value + elif direction == "read" and key not in aliases_to_field_names: + converted_object[key] = convert_and_respect_annotation_metadata( + object_=value, annotation=type_, direction=direction + ) + else: + converted_object[ + _alias_key(key, type_, direction, aliases_to_field_names) + ] = convert_and_respect_annotation_metadata( + object_=value, annotation=type_, direction=direction + ) + return converted_object + + +def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]: + maybe_annotated_type = typing_extensions.get_origin(type_) + if maybe_annotated_type is None: + return None + + if maybe_annotated_type == typing_extensions.NotRequired: + type_ = typing_extensions.get_args(type_)[0] + maybe_annotated_type = typing_extensions.get_origin(type_) + + if maybe_annotated_type == typing_extensions.Annotated: + return type_ + + return None + + +def _remove_annotations(type_: typing.Any) -> typing.Any: + maybe_annotated_type = typing_extensions.get_origin(type_) + if maybe_annotated_type is None: + return type_ + + if maybe_annotated_type == typing_extensions.NotRequired: + return _remove_annotations(typing_extensions.get_args(type_)[0]) + + if maybe_annotated_type == typing_extensions.Annotated: + return _remove_annotations(typing_extensions.get_args(type_)[0]) + + return type_ + + +def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]: + annotations = typing_extensions.get_type_hints(type_, include_extras=True) + return _get_alias_to_field_name(annotations) + + +def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]: + annotations = typing_extensions.get_type_hints(type_, include_extras=True) + return _get_field_to_alias_name(annotations) + + +def _get_alias_to_field_name( + field_to_hint: typing.Dict[str, typing.Any], +) -> typing.Dict[str, str]: + aliases = {} + for field, hint in field_to_hint.items(): + maybe_alias = _get_alias_from_type(hint) + if maybe_alias is not None: + aliases[maybe_alias] = field + return aliases + + +def _get_field_to_alias_name( + field_to_hint: typing.Dict[str, typing.Any], +) -> typing.Dict[str, str]: + aliases = {} + for field, hint in field_to_hint.items(): + maybe_alias = _get_alias_from_type(hint) + if maybe_alias is not None: + aliases[field] = maybe_alias + return aliases + + +def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]: + maybe_annotated_type = _get_annotation(type_) + + if maybe_annotated_type is not None: + # The actual annotations are 1 onward, the first is the annotated type + annotations = typing_extensions.get_args(maybe_annotated_type)[1:] + + for annotation in annotations: + if isinstance(annotation, FieldMetadata) and annotation.alias is not None: + return annotation.alias + return None + + +def _alias_key( + key: str, + type_: typing.Any, + direction: typing.Literal["read", "write"], + aliases_to_field_names: typing.Dict[str, str], +) -> str: + if direction == "read": + return aliases_to_field_names.get(key, key) + return _get_alias_from_type(type_=type_) or key diff --git a/services/completion-stateless-sdk/agenta/client/backend/environments/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/environments/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/environments/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/environments/client.py b/services/completion-stateless-sdk/agenta/client/backend/environments/client.py new file mode 100644 index 0000000000..1cfbb07010 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/environments/client.py @@ -0,0 +1,190 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EnvironmentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def deploy_to_environment( + self, + *, + environment_name: str, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Deploys a given variant to an environment + + Args: + environment_name: Name of the environment to deploy to. + variant_id: variant id to deploy. + stoken_session: . Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the deployment fails. + + Parameters + ---------- + environment_name : str + + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.environments.deploy_to_environment( + environment_name="environment_name", + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "environments/deploy", + method="POST", + json={ + "environment_name": environment_name, + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEnvironmentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def deploy_to_environment( + self, + *, + environment_name: str, + variant_id: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Deploys a given variant to an environment + + Args: + environment_name: Name of the environment to deploy to. + variant_id: variant id to deploy. + stoken_session: . Defaults to Depends(verify_session()). + + Raises: + HTTPException: If the deployment fails. + + Parameters + ---------- + environment_name : str + + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.environments.deploy_to_environment( + environment_name="environment_name", + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "environments/deploy", + method="POST", + json={ + "environment_name": environment_name, + "variant_id": variant_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/errors/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/errors/__init__.py new file mode 100644 index 0000000000..cb64e066bf --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/errors/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .unprocessable_entity_error import UnprocessableEntityError + +__all__ = ["UnprocessableEntityError"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/errors/unprocessable_entity_error.py b/services/completion-stateless-sdk/agenta/client/backend/errors/unprocessable_entity_error.py new file mode 100644 index 0000000000..47470a70e7 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/errors/unprocessable_entity_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.api_error import ApiError +from ..types.http_validation_error import HttpValidationError + + +class UnprocessableEntityError(ApiError): + def __init__(self, body: HttpValidationError): + super().__init__(status_code=422, body=body) diff --git a/services/completion-stateless-sdk/agenta/client/backend/evaluations/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/evaluations/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/evaluations/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/evaluations/client.py b/services/completion-stateless-sdk/agenta/client/backend/evaluations/client.py new file mode 100644 index 0000000000..a8190dd1f2 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/evaluations/client.py @@ -0,0 +1,1456 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.evaluation import Evaluation +from ..types.llm_run_rate_limit import LlmRunRateLimit +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.jsonable_encoder import jsonable_encoder +from ..types.evaluation_scenario import EvaluationScenario +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def fetch_evaluation_ids( + self, + *, + resource_type: str, + resource_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Fetches evaluation ids for a given resource type and id. + + Arguments: + resource_type (str): The type of resource for which to fetch evaluations. + resource_ids List[ObjectId]: The IDs of resource for which to fetch evaluations. + + Raises: + HTTPException: If the resource_type is invalid or access is denied. + + Returns: + List[str]: A list of evaluation ids. + + Parameters + ---------- + resource_type : str + + resource_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_ids( + resource_type="resource_type", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations/by_resource", + method="GET", + params={ + "resource_type": resource_type, + "resource_ids": resource_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_list_evaluations( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluation]: + """ + Fetches a list of evaluations, optionally filtered by an app ID. + + Args: + app_id (Optional[str]): An optional app ID to filter the evaluations. + + Returns: + List[Evaluation]: A list of evaluations. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_list_evaluations( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_evaluation( + self, + *, + app_id: str, + variant_ids: typing.Sequence[str], + evaluators_configs: typing.Sequence[str], + testset_id: str, + rate_limit: LlmRunRateLimit, + lm_providers_keys: typing.Optional[ + typing.Dict[str, typing.Optional[str]] + ] = OMIT, + correct_answer_column: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[Evaluation]: + """ + Creates a new comparison table document + Raises: + HTTPException: _description_ + Returns: + _description_ + + Parameters + ---------- + app_id : str + + variant_ids : typing.Sequence[str] + + evaluators_configs : typing.Sequence[str] + + testset_id : str + + rate_limit : LlmRunRateLimit + + lm_providers_keys : typing.Optional[typing.Dict[str, typing.Optional[str]]] + + correct_answer_column : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + from agenta import AgentaApi, LlmRunRateLimit + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.create_evaluation( + app_id="app_id", + variant_ids=["variant_ids"], + evaluators_configs=["evaluators_configs"], + testset_id="testset_id", + rate_limit=LlmRunRateLimit( + batch_size=1, + max_retries=1, + retry_delay=1, + delay_between_batches=1, + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "app_id": app_id, + "variant_ids": variant_ids, + "evaluators_configs": evaluators_configs, + "testset_id": testset_id, + "rate_limit": convert_and_respect_annotation_metadata( + object_=rate_limit, annotation=LlmRunRateLimit, direction="write" + ), + "lm_providers_keys": lm_providers_keys, + "correct_answer_column": correct_answer_column, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_evaluations( + self, + *, + evaluations_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific comparison tables based on their unique IDs. + + Args: + delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete. + + Returns: + A list of the deleted comparison tables' IDs. + + Parameters + ---------- + evaluations_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.delete_evaluations( + evaluations_ids=["evaluations_ids"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="DELETE", + json={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_status( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the status of the evaluation. + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + (str): the evaluation status + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_status( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/status", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_results( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the results of the evaluation + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + _type_: _description_ + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_results( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/results", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_scenarios( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[EvaluationScenario]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluationScenario] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_scenarios( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/evaluation_scenarios", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluationScenario], + parse_obj_as( + type_=typing.List[EvaluationScenario], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> Evaluation: + """ + Fetches a single evaluation based on its ID. + + Args: + evaluation_id (str): The ID of the evaluation to fetch. + + Returns: + Evaluation: The fetched evaluation. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Evaluation + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation( + evaluation_id="evaluation_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Evaluation, + parse_obj_as( + type_=Evaluation, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def fetch_evaluation_scenarios( + self, + *, + evaluations_ids: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluations_ids : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluations.fetch_evaluation_scenarios( + evaluations_ids="evaluations_ids", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations/evaluation_scenarios/comparison-results", + method="GET", + params={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEvaluationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def fetch_evaluation_ids( + self, + *, + resource_type: str, + resource_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Fetches evaluation ids for a given resource type and id. + + Arguments: + resource_type (str): The type of resource for which to fetch evaluations. + resource_ids List[ObjectId]: The IDs of resource for which to fetch evaluations. + + Raises: + HTTPException: If the resource_type is invalid or access is denied. + + Returns: + List[str]: A list of evaluation ids. + + Parameters + ---------- + + resource_type : str + + resource_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_ids( + resource_type="resource_type", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations/by_resource", + method="GET", + params={ + "resource_type": resource_type, + "resource_ids": resource_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_list_evaluations( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluation]: + """ + Fetches a list of evaluations, optionally filtered by an app ID. + + Args: + app_id (Optional[str]): An optional app ID to filter the evaluations. + + Returns: + List[Evaluation]: A list of evaluations. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_list_evaluations( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_evaluation( + self, + *, + app_id: str, + variant_ids: typing.Sequence[str], + evaluators_configs: typing.Sequence[str], + testset_id: str, + rate_limit: LlmRunRateLimit, + lm_providers_keys: typing.Optional[ + typing.Dict[str, typing.Optional[str]] + ] = OMIT, + correct_answer_column: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[Evaluation]: + """ + Creates a new comparison table document + Raises: + HTTPException: _description_ + Returns: + _description_ + + Parameters + ---------- + app_id : str + + variant_ids : typing.Sequence[str] + + evaluators_configs : typing.Sequence[str] + + testset_id : str + + rate_limit : LlmRunRateLimit + + lm_providers_keys : typing.Optional[typing.Dict[str, typing.Optional[str]]] + + correct_answer_column : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluation] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, LlmRunRateLimit + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.create_evaluation( + app_id="app_id", + variant_ids=["variant_ids"], + evaluators_configs=["evaluators_configs"], + testset_id="testset_id", + rate_limit=LlmRunRateLimit( + batch_size=1, + max_retries=1, + retry_delay=1, + delay_between_batches=1, + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "app_id": app_id, + "variant_ids": variant_ids, + "evaluators_configs": evaluators_configs, + "testset_id": testset_id, + "rate_limit": convert_and_respect_annotation_metadata( + object_=rate_limit, annotation=LlmRunRateLimit, direction="write" + ), + "lm_providers_keys": lm_providers_keys, + "correct_answer_column": correct_answer_column, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluation], + parse_obj_as( + type_=typing.List[Evaluation], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_evaluations( + self, + *, + evaluations_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific comparison tables based on their unique IDs. + + Args: + delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete. + + Returns: + A list of the deleted comparison tables' IDs. + + Parameters + ---------- + evaluations_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.delete_evaluations( + evaluations_ids=["evaluations_ids"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="DELETE", + json={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_status( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the status of the evaluation. + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + (str): the evaluation status + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_status( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/status", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_results( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches the results of the evaluation + + Args: + evaluation_id (str): the evaluation id + request (Request): the request object + + Returns: + _type_: _description_ + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_results( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/results", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_scenarios( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[EvaluationScenario]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluationScenario] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_scenarios( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}/evaluation_scenarios", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluationScenario], + parse_obj_as( + type_=typing.List[EvaluationScenario], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation( + self, + evaluation_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> Evaluation: + """ + Fetches a single evaluation based on its ID. + + Args: + evaluation_id (str): The ID of the evaluation to fetch. + + Returns: + Evaluation: The fetched evaluation. + + Parameters + ---------- + evaluation_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Evaluation + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation( + evaluation_id="evaluation_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(evaluation_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Evaluation, + parse_obj_as( + type_=Evaluation, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def fetch_evaluation_scenarios( + self, + *, + evaluations_ids: str, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetches evaluation scenarios for a given evaluation ID. + + Arguments: + evaluation_id (str): The ID of the evaluation for which to fetch scenarios. + + Raises: + HTTPException: If the evaluation is not found or access is denied. + + Returns: + List[EvaluationScenario]: A list of evaluation scenarios. + + Parameters + ---------- + evaluations_ids : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluations.fetch_evaluation_scenarios( + evaluations_ids="evaluations_ids", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations/evaluation_scenarios/comparison-results", + method="GET", + params={ + "evaluations_ids": evaluations_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/evaluators/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/evaluators/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/evaluators/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/evaluators/client.py b/services/completion-stateless-sdk/agenta/client/backend/evaluators/client.py new file mode 100644 index 0000000000..29bafb0305 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/evaluators/client.py @@ -0,0 +1,1259 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.evaluator import Evaluator +from ..core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.evaluator_mapping_output_interface import EvaluatorMappingOutputInterface +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..types.evaluator_output_interface import EvaluatorOutputInterface +from ..core.jsonable_encoder import jsonable_encoder +from ..types.evaluator_config import EvaluatorConfig +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluatorsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def get_evaluators_endpoint( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluator]: + """ + Endpoint to fetch a list of evaluators. + + Returns: + List[Evaluator]: A list of evaluator objects. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluator] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.get_evaluators_endpoint() + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluator], + parse_obj_as( + type_=typing.List[Evaluator], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def evaluator_data_map( + self, + *, + inputs: typing.Dict[str, typing.Optional[typing.Any]], + mapping: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorMappingOutputInterface: + """ + Endpoint to map the experiment data tree to evaluator interface. + + Args: + request (Request): The request object. + payload (EvaluatorMappingInputInterface): The payload containing the request data. + + Returns: + EvaluatorMappingOutputInterface: the evaluator mapping output object + + Parameters + ---------- + inputs : typing.Dict[str, typing.Optional[typing.Any]] + + mapping : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorMappingOutputInterface + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.evaluator_data_map( + inputs={"key": "value"}, + mapping={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/map", + method="POST", + json={ + "inputs": inputs, + "mapping": mapping, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorMappingOutputInterface, + parse_obj_as( + type_=EvaluatorMappingOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def evaluator_run( + self, + evaluator_key: str, + *, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + credentials: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorOutputInterface: + """ + Endpoint to evaluate LLM app run + + Args: + request (Request): The request object. + evaluator_key (str): The key of the evaluator. + payload (EvaluatorInputInterface): The payload containing the request data. + + Returns: + result: EvaluatorOutputInterface object containing the outputs. + + Parameters + ---------- + evaluator_key : str + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorOutputInterface + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.evaluator_run( + evaluator_key="evaluator_key", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(evaluator_key)}/run", + method="POST", + json={ + "inputs": inputs, + "settings": settings, + "credentials": credentials, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorOutputInterface, + parse_obj_as( + type_=EvaluatorOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_evaluator_configs( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EvaluatorConfig]: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluatorConfig] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.get_evaluator_configs( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluatorConfig], + parse_obj_as( + type_=typing.List[EvaluatorConfig], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_new_evaluator_config( + self, + *, + app_id: str, + name: str, + evaluator_key: str, + settings_values: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + EvaluatorConfigDB: Evaluator configuration api model. + + Parameters + ---------- + app_id : str + + name : str + + evaluator_key : str + + settings_values : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.create_new_evaluator_config( + app_id="app_id", + name="name", + evaluator_key="evaluator_key", + settings_values={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="POST", + json={ + "app_id": app_id, + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.get_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_evaluator_config( + self, + evaluator_config_id: str, + *, + name: typing.Optional[str] = OMIT, + evaluator_key: typing.Optional[str] = OMIT, + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to update evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + name : typing.Optional[str] + + evaluator_key : typing.Optional[str] + + settings_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.update_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="PUT", + json={ + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Endpoint to delete a specific evaluator configuration. + + Args: + evaluator_config_id (str): The unique identifier of the evaluator configuration. + + Returns: + bool: True if deletion was successful, False otherwise. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.evaluators.delete_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncEvaluatorsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_evaluators_endpoint( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[Evaluator]: + """ + Endpoint to fetch a list of evaluators. + + Returns: + List[Evaluator]: A list of evaluator objects. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[Evaluator] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.get_evaluators_endpoint() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[Evaluator], + parse_obj_as( + type_=typing.List[Evaluator], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def evaluator_data_map( + self, + *, + inputs: typing.Dict[str, typing.Optional[typing.Any]], + mapping: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorMappingOutputInterface: + """ + Endpoint to map the experiment data tree to evaluator interface. + + Args: + request (Request): The request object. + payload (EvaluatorMappingInputInterface): The payload containing the request data. + + Returns: + EvaluatorMappingOutputInterface: the evaluator mapping output object + + Parameters + ---------- + inputs : typing.Dict[str, typing.Optional[typing.Any]] + + mapping : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorMappingOutputInterface + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.evaluator_data_map( + inputs={"key": "value"}, + mapping={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/map", + method="POST", + json={ + "inputs": inputs, + "mapping": mapping, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorMappingOutputInterface, + parse_obj_as( + type_=EvaluatorMappingOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def evaluator_run( + self, + evaluator_key: str, + *, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + credentials: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorOutputInterface: + """ + Endpoint to evaluate LLM app run + + Args: + request (Request): The request object. + evaluator_key (str): The key of the evaluator. + payload (EvaluatorInputInterface): The payload containing the request data. + + Returns: + result: EvaluatorOutputInterface object containing the outputs. + + Parameters + ---------- + evaluator_key : str + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorOutputInterface + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.evaluator_run( + evaluator_key="evaluator_key", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(evaluator_key)}/run", + method="POST", + json={ + "inputs": inputs, + "settings": settings, + "credentials": credentials, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorOutputInterface, + parse_obj_as( + type_=EvaluatorOutputInterface, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_evaluator_configs( + self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[EvaluatorConfig]: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + app_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[EvaluatorConfig] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.get_evaluator_configs( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="GET", + params={ + "app_id": app_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[EvaluatorConfig], + parse_obj_as( + type_=typing.List[EvaluatorConfig], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_new_evaluator_config( + self, + *, + app_id: str, + name: str, + evaluator_key: str, + settings_values: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Args: + app_id (str): The ID of the app. + + Returns: + EvaluatorConfigDB: Evaluator configuration api model. + + Parameters + ---------- + app_id : str + + name : str + + evaluator_key : str + + settings_values : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.create_new_evaluator_config( + app_id="app_id", + name="name", + evaluator_key="evaluator_key", + settings_values={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/configs", + method="POST", + json={ + "app_id": app_id, + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to fetch evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.get_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_evaluator_config( + self, + evaluator_config_id: str, + *, + name: typing.Optional[str] = OMIT, + evaluator_key: typing.Optional[str] = OMIT, + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorConfig: + """ + Endpoint to update evaluator configurations for a specific app. + + Returns: + List[EvaluatorConfigDB]: A list of evaluator configuration objects. + + Parameters + ---------- + evaluator_config_id : str + + name : typing.Optional[str] + + evaluator_key : typing.Optional[str] + + settings_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorConfig + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.update_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="PUT", + json={ + "name": name, + "evaluator_key": evaluator_key, + "settings_values": settings_values, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EvaluatorConfig, + parse_obj_as( + type_=EvaluatorConfig, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_evaluator_config( + self, + evaluator_config_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Endpoint to delete a specific evaluator configuration. + + Args: + evaluator_config_id (str): The unique identifier of the evaluator configuration. + + Returns: + bool: True if deletion was successful, False otherwise. + + Parameters + ---------- + evaluator_config_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.evaluators.delete_evaluator_config( + evaluator_config_id="evaluator_config_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/configs/{jsonable_encoder(evaluator_config_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/observability/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability/client.py b/services/completion-stateless-sdk/agenta/client/backend/observability/client.py new file mode 100644 index 0000000000..aebe134924 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability/client.py @@ -0,0 +1,1280 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.create_span import CreateSpan +from ..types.create_trace_response import CreateTraceResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..types.with_pagination import WithPagination +from ..types.trace_detail import TraceDetail +from ..core.jsonable_encoder import jsonable_encoder +from ..types.span_detail import SpanDetail +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ObservabilityClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def dashboard( + self, + *, + app_id: str, + time_range: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + time_range : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.dashboard( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/dashboard/", + method="GET", + params={ + "app_id": app_id, + "timeRange": time_range, + "environment": environment, + "variant": variant, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_traces( + self, + *, + trace: str, + spans: typing.Sequence[CreateSpan], + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateTraceResponse: + """ + Parameters + ---------- + trace : str + + spans : typing.Sequence[CreateSpan] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateTraceResponse + Successful Response + + Examples + -------- + import datetime + + from agenta import AgentaApi, CreateSpan + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.create_traces( + trace="trace", + spans=[ + CreateSpan( + id="id", + app_id="app_id", + name="name", + spankind="spankind", + status="status", + start_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + end_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + ) + ], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/trace/", + method="POST", + json={ + "trace": trace, + "spans": convert_and_respect_annotation_metadata( + object_=spans, + annotation=typing.Sequence[CreateSpan], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateTraceResponse, + parse_obj_as( + type_=CreateTraceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_traces( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> WithPagination: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WithPagination + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_traces( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/traces/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WithPagination, + parse_obj_as( + type_=WithPagination, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_traces_legacy( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.delete_traces_legacy( + request=["string"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/traces/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_trace_detail( + self, trace_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> TraceDetail: + """ + Parameters + ---------- + trace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TraceDetail + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_trace_detail( + trace_id="trace_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"observability/traces/{jsonable_encoder(trace_id)}/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TraceDetail, + parse_obj_as( + type_=TraceDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_spans_of_generation( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_spans_of_generation( + app_id="app_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/spans/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_spans_of_trace( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.delete_spans_of_trace( + request=["string"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "observability/spans/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_span_of_generation( + self, + span_id: str, + *, + type: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SpanDetail: + """ + Parameters + ---------- + span_id : str + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SpanDetail + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability.get_span_of_generation( + span_id="span_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"observability/spans/{jsonable_encoder(span_id)}/", + method="GET", + params={ + "type": type, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SpanDetail, + parse_obj_as( + type_=SpanDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncObservabilityClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def dashboard( + self, + *, + app_id: str, + time_range: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + time_range : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.dashboard( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/dashboard/", + method="GET", + params={ + "app_id": app_id, + "timeRange": time_range, + "environment": environment, + "variant": variant, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_traces( + self, + *, + trace: str, + spans: typing.Sequence[CreateSpan], + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateTraceResponse: + """ + Parameters + ---------- + trace : str + + spans : typing.Sequence[CreateSpan] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateTraceResponse + Successful Response + + Examples + -------- + import asyncio + import datetime + + from agenta import AsyncAgentaApi, CreateSpan + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.create_traces( + trace="trace", + spans=[ + CreateSpan( + id="id", + app_id="app_id", + name="name", + spankind="spankind", + status="status", + start_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + end_time=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/trace/", + method="POST", + json={ + "trace": trace, + "spans": convert_and_respect_annotation_metadata( + object_=spans, + annotation=typing.Sequence[CreateSpan], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateTraceResponse, + parse_obj_as( + type_=CreateTraceResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_traces( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> WithPagination: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + WithPagination + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_traces( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/traces/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + WithPagination, + parse_obj_as( + type_=WithPagination, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_traces_legacy( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.delete_traces_legacy( + request=["string"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/traces/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_trace_detail( + self, trace_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> TraceDetail: + """ + Parameters + ---------- + trace_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TraceDetail + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_trace_detail( + trace_id="trace_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"observability/traces/{jsonable_encoder(trace_id)}/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TraceDetail, + parse_obj_as( + type_=TraceDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_spans_of_generation( + self, + *, + app_id: str, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + type: typing.Optional[str] = None, + trace_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + variant: typing.Optional[str] = None, + created_at: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + app_id : str + + page : typing.Optional[int] + + page_size : typing.Optional[int] + + type : typing.Optional[str] + + trace_id : typing.Optional[str] + + environment : typing.Optional[str] + + variant : typing.Optional[str] + + created_at : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_spans_of_generation( + app_id="app_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/spans/", + method="GET", + params={ + "app_id": app_id, + "page": page, + "pageSize": page_size, + "type": type, + "trace_id": trace_id, + "environment": environment, + "variant": variant, + "created_at": created_at, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_spans_of_trace( + self, + *, + request: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> bool: + """ + Parameters + ---------- + request : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + bool + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.delete_spans_of_trace( + request=["string"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/spans/", + method="DELETE", + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + bool, + parse_obj_as( + type_=bool, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_span_of_generation( + self, + span_id: str, + *, + type: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SpanDetail: + """ + Parameters + ---------- + span_id : str + + type : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SpanDetail + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability.get_span_of_generation( + span_id="span_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"observability/spans/{jsonable_encoder(span_id)}/", + method="GET", + params={ + "type": type, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SpanDetail, + parse_obj_as( + type_=SpanDetail, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/__init__.py new file mode 100644 index 0000000000..aceeca0c75 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import Format, QueryTracesResponse + +__all__ = ["Format", "QueryTracesResponse"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/client.py b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/client.py new file mode 100644 index 0000000000..5fa38f8c3c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/client.py @@ -0,0 +1,560 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import SyncClientWrapper +import typing +from ..core.request_options import RequestOptions +from ..types.collect_status_response import CollectStatusResponse +from ..core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .types.format import Format +from .types.query_traces_response import QueryTracesResponse +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..core.client_wrapper import AsyncClientWrapper + + +class ObservabilityV1Client: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def otlp_status( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Status of OTLP endpoint. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.otlp_status() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def otlp_receiver( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Receive traces via OTLP. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.otlp_receiver() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def query_traces( + self, + *, + format: typing.Optional[Format] = None, + focus: typing.Optional[str] = None, + oldest: typing.Optional[str] = None, + newest: typing.Optional[str] = None, + filtering: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + next: typing.Optional[str] = None, + stop: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QueryTracesResponse: + """ + Query traces, with optional grouping, windowing, filtering, and pagination. + + Parameters + ---------- + format : typing.Optional[Format] + + focus : typing.Optional[str] + + oldest : typing.Optional[str] + + newest : typing.Optional[str] + + filtering : typing.Optional[str] + + page : typing.Optional[int] + + size : typing.Optional[int] + + next : typing.Optional[str] + + stop : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QueryTracesResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.query_traces() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="GET", + params={ + "format": format, + "focus": focus, + "oldest": oldest, + "newest": newest, + "filtering": filtering, + "page": page, + "size": size, + "next": next, + "stop": stop, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QueryTracesResponse, + parse_obj_as( + type_=QueryTracesResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_traces( + self, + *, + node_id: typing.Optional[str] = None, + node_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CollectStatusResponse: + """ + Delete trace. + + Parameters + ---------- + node_id : typing.Optional[str] + + node_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.observability_v_1.delete_traces() + """ + _response = self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="DELETE", + params={ + "node_id": node_id, + "node_ids": node_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncObservabilityV1Client: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def otlp_status( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Status of OTLP endpoint. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.otlp_status() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def otlp_receiver( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CollectStatusResponse: + """ + Receive traces via OTLP. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.otlp_receiver() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/otlp/traces", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def query_traces( + self, + *, + format: typing.Optional[Format] = None, + focus: typing.Optional[str] = None, + oldest: typing.Optional[str] = None, + newest: typing.Optional[str] = None, + filtering: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + next: typing.Optional[str] = None, + stop: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QueryTracesResponse: + """ + Query traces, with optional grouping, windowing, filtering, and pagination. + + Parameters + ---------- + format : typing.Optional[Format] + + focus : typing.Optional[str] + + oldest : typing.Optional[str] + + newest : typing.Optional[str] + + filtering : typing.Optional[str] + + page : typing.Optional[int] + + size : typing.Optional[int] + + next : typing.Optional[str] + + stop : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QueryTracesResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.query_traces() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="GET", + params={ + "format": format, + "focus": focus, + "oldest": oldest, + "newest": newest, + "filtering": filtering, + "page": page, + "size": size, + "next": next, + "stop": stop, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QueryTracesResponse, + parse_obj_as( + type_=QueryTracesResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_traces( + self, + *, + node_id: typing.Optional[str] = None, + node_ids: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CollectStatusResponse: + """ + Delete trace. + + Parameters + ---------- + node_id : typing.Optional[str] + + node_ids : typing.Optional[typing.Union[str, typing.Sequence[str]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CollectStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.observability_v_1.delete_traces() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "observability/v1/traces", + method="DELETE", + params={ + "node_id": node_id, + "node_ids": node_ids, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CollectStatusResponse, + parse_obj_as( + type_=CollectStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/__init__.py new file mode 100644 index 0000000000..7303a90f08 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/__init__.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +from .format import Format +from .query_traces_response import QueryTracesResponse + +__all__ = ["Format", "QueryTracesResponse"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/format.py b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/format.py new file mode 100644 index 0000000000..ed6f7db216 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Format = typing.Union[typing.Literal["opentelemetry", "agenta"], typing.Any] diff --git a/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/query_traces_response.py b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/query_traces_response.py new file mode 100644 index 0000000000..4219a5b7e9 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/observability_v_1/types/query_traces_response.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.o_tel_spans_response import OTelSpansResponse +from ...types.agenta_nodes_response import AgentaNodesResponse +from ...types.agenta_trees_response import AgentaTreesResponse +from ...types.agenta_roots_response import AgentaRootsResponse + +QueryTracesResponse = typing.Union[ + OTelSpansResponse, AgentaNodesResponse, AgentaTreesResponse, AgentaRootsResponse +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/testsets/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/testsets/__init__.py new file mode 100644 index 0000000000..67a41b2742 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/testsets/__init__.py @@ -0,0 +1 @@ +# This file was auto-generated by Fern from our API Definition. diff --git a/services/completion-stateless-sdk/agenta/client/backend/testsets/client.py b/services/completion-stateless-sdk/agenta/client/backend/testsets/client.py new file mode 100644 index 0000000000..fc10205700 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/testsets/client.py @@ -0,0 +1,1100 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .. import core +from ..core.request_options import RequestOptions +from ..types.test_set_simple_response import TestSetSimpleResponse +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.jsonable_encoder import jsonable_encoder +from ..types.test_set_output_response import TestSetOutputResponse +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class TestsetsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def upload_file( + self, + *, + file: core.File, + upload_type: typing.Optional[str] = OMIT, + testset_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Uploads a CSV or JSON file and saves its data to MongoDB. + + Args: + upload_type : Either a json or csv file. + file (UploadFile): The CSV or JSON file to upload. + testset_name (Optional): the name of the testset if provided. + + Returns: + dict: The result of the upload process. + + Parameters + ---------- + file : core.File + See core.File for more documentation + + upload_type : typing.Optional[str] + + testset_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.upload_file() + """ + _response = self._client_wrapper.httpx_client.request( + "testsets/upload", + method="POST", + data={ + "upload_type": upload_type, + "testset_name": testset_name, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def import_testset( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TestSetSimpleResponse: + """ + Import JSON testset data from an endpoint and save it to MongoDB. + + Args: + endpoint (str): An endpoint URL to import data from. + testset_name (str): the name of the testset if provided. + + Returns: + dict: The result of the import process. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.import_testset() + """ + _response = self._client_wrapper.httpx_client.request( + "testsets/endpoint", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_testset( + self, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Create a testset with given name, save the testset to MongoDB. + + Args: + name (str): name of the test set. + testset (Dict[str, str]): test set data. + + Returns: + str: The id of the test set created. + + Parameters + ---------- + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.create_testset( + name="name", + csvdata=[{"key": "value"}], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "testsets", + method="POST", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_single_testset( + self, + testset_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetch a specific testset in a MongoDB collection using its \_id. + + Args: + testset_id (str): The \_id of the testset to fetch. + + Returns: + The requested testset if found, else an HTTPException. + + Parameters + ---------- + testset_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.get_single_testset( + testset_id="testset_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_testset( + self, + testset_id: str, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Update a testset with given id, update the testset in MongoDB. + + Args: + testset_id (str): id of the test set to be updated. + csvdata (NewTestset): New data to replace the old testset. + + Returns: + str: The id of the test set updated. + + Parameters + ---------- + testset_id : str + + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.update_testset( + testset_id="testset_id", + name="name", + csvdata=[{"key": "value"}], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="PUT", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_testsets( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[TestSetOutputResponse]: + """ + Get all testsets. + + Returns: + + - A list of testset objects. + + Raises: + + - `HTTPException` with status code 404 if no testsets are found. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[TestSetOutputResponse] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.get_testsets() + """ + _response = self._client_wrapper.httpx_client.request( + "testsets", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[TestSetOutputResponse], + parse_obj_as( + type_=typing.List[TestSetOutputResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_testsets( + self, + *, + testset_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific testsets based on their unique IDs. + + Args: + testset_ids (List[str]): The unique identifiers of the testsets to delete. + + Returns: + A list of the deleted testsets' IDs. + + Parameters + ---------- + testset_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.testsets.delete_testsets( + testset_ids=["testset_ids"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "testsets", + method="DELETE", + json={ + "testset_ids": testset_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncTestsetsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def upload_file( + self, + *, + file: core.File, + upload_type: typing.Optional[str] = OMIT, + testset_name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Uploads a CSV or JSON file and saves its data to MongoDB. + + Args: + upload_type : Either a json or csv file. + file (UploadFile): The CSV or JSON file to upload. + testset_name (Optional): the name of the testset if provided. + + Returns: + dict: The result of the upload process. + + Parameters + ---------- + file : core.File + See core.File for more documentation + + upload_type : typing.Optional[str] + + testset_name : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.upload_file() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets/upload", + method="POST", + data={ + "upload_type": upload_type, + "testset_name": testset_name, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def import_testset( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TestSetSimpleResponse: + """ + Import JSON testset data from an endpoint and save it to MongoDB. + + Args: + endpoint (str): An endpoint URL to import data from. + testset_name (str): the name of the testset if provided. + + Returns: + dict: The result of the import process. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.import_testset() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets/endpoint", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_testset( + self, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> TestSetSimpleResponse: + """ + Create a testset with given name, save the testset to MongoDB. + + Args: + name (str): name of the test set. + testset (Dict[str, str]): test set data. + + Returns: + str: The id of the test set created. + + Parameters + ---------- + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TestSetSimpleResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.create_testset( + name="name", + csvdata=[{"key": "value"}], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets", + method="POST", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TestSetSimpleResponse, + parse_obj_as( + type_=TestSetSimpleResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_single_testset( + self, + testset_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Fetch a specific testset in a MongoDB collection using its \_id. + + Args: + testset_id (str): The \_id of the testset to fetch. + + Returns: + The requested testset if found, else an HTTPException. + + Parameters + ---------- + testset_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.get_single_testset( + testset_id="testset_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_testset( + self, + testset_id: str, + *, + name: str, + csvdata: typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Update a testset with given id, update the testset in MongoDB. + + Args: + testset_id (str): id of the test set to be updated. + csvdata (NewTestset): New data to replace the old testset. + + Returns: + str: The id of the test set updated. + + Parameters + ---------- + testset_id : str + + name : str + + csvdata : typing.Sequence[typing.Dict[str, typing.Optional[typing.Any]]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.update_testset( + testset_id="testset_id", + name="name", + csvdata=[{"key": "value"}], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"testsets/{jsonable_encoder(testset_id)}", + method="PUT", + json={ + "name": name, + "csvdata": csvdata, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_testsets( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[TestSetOutputResponse]: + """ + Get all testsets. + + Returns: + + - A list of testset objects. + + Raises: + + - `HTTPException` with status code 404 if no testsets are found. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[TestSetOutputResponse] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.get_testsets() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[TestSetOutputResponse], + parse_obj_as( + type_=typing.List[TestSetOutputResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_testsets( + self, + *, + testset_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[str]: + """ + Delete specific testsets based on their unique IDs. + + Args: + testset_ids (List[str]): The unique identifiers of the testsets to delete. + + Returns: + A list of the deleted testsets' IDs. + + Parameters + ---------- + testset_ids : typing.Sequence[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[str] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.testsets.delete_testsets( + testset_ids=["testset_ids"], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "testsets", + method="DELETE", + json={ + "testset_ids": testset_ids, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[str], + parse_obj_as( + type_=typing.List[str], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/types/__init__.py new file mode 100644 index 0000000000..b10c09c61b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/__init__.py @@ -0,0 +1,219 @@ +# This file was auto-generated by Fern from our API Definition. + +from .agenta_node_dto import AgentaNodeDto +from .agenta_node_dto_nodes_value import AgentaNodeDtoNodesValue +from .agenta_nodes_response import AgentaNodesResponse +from .agenta_root_dto import AgentaRootDto +from .agenta_roots_response import AgentaRootsResponse +from .agenta_tree_dto import AgentaTreeDto +from .agenta_trees_response import AgentaTreesResponse +from .aggregated_result import AggregatedResult +from .aggregated_result_evaluator_config import AggregatedResultEvaluatorConfig +from .app import App +from .app_variant_response import AppVariantResponse +from .app_variant_revision import AppVariantRevision +from .base_output import BaseOutput +from .body_import_testset import BodyImportTestset +from .collect_status_response import CollectStatusResponse +from .config_db import ConfigDb +from .config_dto import ConfigDto +from .config_response_model import ConfigResponseModel +from .correct_answer import CorrectAnswer +from .create_app_output import CreateAppOutput +from .create_span import CreateSpan +from .create_trace_response import CreateTraceResponse +from .docker_env_vars import DockerEnvVars +from .environment_output import EnvironmentOutput +from .environment_output_extended import EnvironmentOutputExtended +from .environment_revision import EnvironmentRevision +from .error import Error +from .evaluation import Evaluation +from .evaluation_scenario import EvaluationScenario +from .evaluation_scenario_input import EvaluationScenarioInput +from .evaluation_scenario_output import EvaluationScenarioOutput +from .evaluation_scenario_result import EvaluationScenarioResult +from .evaluation_scenario_score_update import EvaluationScenarioScoreUpdate +from .evaluation_status_enum import EvaluationStatusEnum +from .evaluation_type import EvaluationType +from .evaluator import Evaluator +from .evaluator_config import EvaluatorConfig +from .evaluator_mapping_output_interface import EvaluatorMappingOutputInterface +from .evaluator_output_interface import EvaluatorOutputInterface +from .exception_dto import ExceptionDto +from .get_config_response import GetConfigResponse +from .http_validation_error import HttpValidationError +from .human_evaluation import HumanEvaluation +from .human_evaluation_scenario import HumanEvaluationScenario +from .human_evaluation_scenario_input import HumanEvaluationScenarioInput +from .human_evaluation_scenario_output import HumanEvaluationScenarioOutput +from .human_evaluation_scenario_update import HumanEvaluationScenarioUpdate +from .human_evaluation_update import HumanEvaluationUpdate +from .image import Image +from .invite_request import InviteRequest +from .lifecycle_dto import LifecycleDto +from .link_dto import LinkDto +from .list_api_keys_response import ListApiKeysResponse +from .llm_run_rate_limit import LlmRunRateLimit +from .llm_tokens import LlmTokens +from .lm_providers_enum import LmProvidersEnum +from .new_human_evaluation import NewHumanEvaluation +from .new_testset import NewTestset +from .node_dto import NodeDto +from .node_type import NodeType +from .o_tel_context_dto import OTelContextDto +from .o_tel_event_dto import OTelEventDto +from .o_tel_extra_dto import OTelExtraDto +from .o_tel_link_dto import OTelLinkDto +from .o_tel_span_dto import OTelSpanDto +from .o_tel_span_kind import OTelSpanKind +from .o_tel_spans_response import OTelSpansResponse +from .o_tel_status_code import OTelStatusCode +from .organization import Organization +from .organization_output import OrganizationOutput +from .outputs import Outputs +from .parent_dto import ParentDto +from .permission import Permission +from .reference_dto import ReferenceDto +from .reference_request_model import ReferenceRequestModel +from .result import Result +from .root_dto import RootDto +from .score import Score +from .simple_evaluation_output import SimpleEvaluationOutput +from .span import Span +from .span_detail import SpanDetail +from .span_dto import SpanDto +from .span_dto_nodes_value import SpanDtoNodesValue +from .span_status_code import SpanStatusCode +from .span_variant import SpanVariant +from .status_code import StatusCode +from .status_dto import StatusDto +from .template import Template +from .template_image_info import TemplateImageInfo +from .test_set_output_response import TestSetOutputResponse +from .test_set_simple_response import TestSetSimpleResponse +from .time_dto import TimeDto +from .trace_detail import TraceDetail +from .tree_dto import TreeDto +from .tree_type import TreeType +from .update_app_output import UpdateAppOutput +from .uri import Uri +from .validation_error import ValidationError +from .validation_error_loc_item import ValidationErrorLocItem +from .variant_action import VariantAction +from .variant_action_enum import VariantActionEnum +from .with_pagination import WithPagination +from .workspace_member_response import WorkspaceMemberResponse +from .workspace_permission import WorkspacePermission +from .workspace_response import WorkspaceResponse +from .workspace_role import WorkspaceRole +from .workspace_role_response import WorkspaceRoleResponse + +__all__ = [ + "AgentaNodeDto", + "AgentaNodeDtoNodesValue", + "AgentaNodesResponse", + "AgentaRootDto", + "AgentaRootsResponse", + "AgentaTreeDto", + "AgentaTreesResponse", + "AggregatedResult", + "AggregatedResultEvaluatorConfig", + "App", + "AppVariantResponse", + "AppVariantRevision", + "BaseOutput", + "BodyImportTestset", + "CollectStatusResponse", + "ConfigDb", + "ConfigDto", + "ConfigResponseModel", + "CorrectAnswer", + "CreateAppOutput", + "CreateSpan", + "CreateTraceResponse", + "DockerEnvVars", + "EnvironmentOutput", + "EnvironmentOutputExtended", + "EnvironmentRevision", + "Error", + "Evaluation", + "EvaluationScenario", + "EvaluationScenarioInput", + "EvaluationScenarioOutput", + "EvaluationScenarioResult", + "EvaluationScenarioScoreUpdate", + "EvaluationStatusEnum", + "EvaluationType", + "Evaluator", + "EvaluatorConfig", + "EvaluatorMappingOutputInterface", + "EvaluatorOutputInterface", + "ExceptionDto", + "GetConfigResponse", + "HttpValidationError", + "HumanEvaluation", + "HumanEvaluationScenario", + "HumanEvaluationScenarioInput", + "HumanEvaluationScenarioOutput", + "HumanEvaluationScenarioUpdate", + "HumanEvaluationUpdate", + "Image", + "InviteRequest", + "LifecycleDto", + "LinkDto", + "ListApiKeysResponse", + "LlmRunRateLimit", + "LlmTokens", + "LmProvidersEnum", + "NewHumanEvaluation", + "NewTestset", + "NodeDto", + "NodeType", + "OTelContextDto", + "OTelEventDto", + "OTelExtraDto", + "OTelLinkDto", + "OTelSpanDto", + "OTelSpanKind", + "OTelSpansResponse", + "OTelStatusCode", + "Organization", + "OrganizationOutput", + "Outputs", + "ParentDto", + "Permission", + "ReferenceDto", + "ReferenceRequestModel", + "Result", + "RootDto", + "Score", + "SimpleEvaluationOutput", + "Span", + "SpanDetail", + "SpanDto", + "SpanDtoNodesValue", + "SpanStatusCode", + "SpanVariant", + "StatusCode", + "StatusDto", + "Template", + "TemplateImageInfo", + "TestSetOutputResponse", + "TestSetSimpleResponse", + "TimeDto", + "TraceDetail", + "TreeDto", + "TreeType", + "UpdateAppOutput", + "Uri", + "ValidationError", + "ValidationErrorLocItem", + "VariantAction", + "VariantActionEnum", + "WithPagination", + "WorkspaceMemberResponse", + "WorkspacePermission", + "WorkspaceResponse", + "WorkspaceRole", + "WorkspaceRoleResponse", +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_node_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_node_dto.py new file mode 100644 index 0000000000..8f8c933eac --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_node_dto.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lifecycle_dto import LifecycleDto +from .root_dto import RootDto +from .tree_dto import TreeDto +from .node_dto import NodeDto +from .parent_dto import ParentDto +from .time_dto import TimeDto +from .status_dto import StatusDto +from .exception_dto import ExceptionDto +from .link_dto import LinkDto +from .o_tel_extra_dto import OTelExtraDto +from .agenta_node_dto_nodes_value import AgentaNodeDtoNodesValue +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentaNodeDto(UniversalBaseModel): + lifecycle: typing.Optional[LifecycleDto] = None + root: RootDto + tree: TreeDto + node: NodeDto + parent: typing.Optional[ParentDto] = None + time: TimeDto + status: StatusDto + exception: typing.Optional[ExceptionDto] = None + data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + metrics: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + meta: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + refs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + links: typing.Optional[typing.List[LinkDto]] = None + otel: typing.Optional[OTelExtraDto] = None + nodes: typing.Optional[ + typing.Dict[str, typing.Optional[AgentaNodeDtoNodesValue]] + ] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_node_dto_nodes_value.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_node_dto_nodes_value.py new file mode 100644 index 0000000000..771c4f8e9f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_node_dto_nodes_value.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .span_dto import SpanDto + +AgentaNodeDtoNodesValue = typing.Union[SpanDto, typing.List[SpanDto]] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_nodes_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_nodes_response.py new file mode 100644 index 0000000000..37b8fea8f7 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_nodes_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +import typing +from .agenta_node_dto import AgentaNodeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaNodesResponse(UniversalBaseModel): + nodes: typing.List[AgentaNodeDto] + version: str + count: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaNodesResponse=AgentaNodesResponse) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_root_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_root_dto.py new file mode 100644 index 0000000000..04e57ee7e3 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_root_dto.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +from .root_dto import RootDto +import typing +from .agenta_tree_dto import AgentaTreeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaRootDto(UniversalBaseModel): + root: RootDto + trees: typing.List[AgentaTreeDto] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaRootDto=AgentaRootDto) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_roots_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_roots_response.py new file mode 100644 index 0000000000..df3d3ba50b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_roots_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +import typing +from .agenta_root_dto import AgentaRootDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaRootsResponse(UniversalBaseModel): + roots: typing.List[AgentaRootDto] + version: str + count: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaRootsResponse=AgentaRootsResponse) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_tree_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_tree_dto.py new file mode 100644 index 0000000000..9e5ced12b5 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_tree_dto.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +from .tree_dto import TreeDto +import typing +from .agenta_node_dto import AgentaNodeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaTreeDto(UniversalBaseModel): + tree: TreeDto + nodes: typing.List[AgentaNodeDto] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaTreeDto=AgentaTreeDto) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/agenta_trees_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_trees_response.py new file mode 100644 index 0000000000..f6e80b5b51 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/agenta_trees_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span_dto import SpanDto +import typing +from .agenta_tree_dto import AgentaTreeDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentaTreesResponse(UniversalBaseModel): + trees: typing.List[AgentaTreeDto] + version: str + count: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(SpanDto, AgentaTreesResponse=AgentaTreesResponse) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/aggregated_result.py b/services/completion-stateless-sdk/agenta/client/backend/types/aggregated_result.py new file mode 100644 index 0000000000..808bddac66 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/aggregated_result.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .aggregated_result_evaluator_config import AggregatedResultEvaluatorConfig +from .result import Result +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AggregatedResult(UniversalBaseModel): + evaluator_config: AggregatedResultEvaluatorConfig + result: Result + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/aggregated_result_evaluator_config.py b/services/completion-stateless-sdk/agenta/client/backend/types/aggregated_result_evaluator_config.py new file mode 100644 index 0000000000..4a9069b4e8 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/aggregated_result_evaluator_config.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .evaluator_config import EvaluatorConfig + +AggregatedResultEvaluatorConfig = typing.Union[ + EvaluatorConfig, typing.Dict[str, typing.Optional[typing.Any]] +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/app.py b/services/completion-stateless-sdk/agenta/client/backend/types/app.py new file mode 100644 index 0000000000..1a16548531 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/app.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class App(UniversalBaseModel): + app_id: str + app_name: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/app_variant_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/app_variant_response.py new file mode 100644 index 0000000000..d173208b14 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/app_variant_response.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AppVariantResponse(UniversalBaseModel): + app_id: str + app_name: str + variant_id: str + variant_name: str + project_id: str + parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + base_name: str + base_id: str + config_name: str + uri: typing.Optional[str] = None + revision: int + created_at: typing.Optional[str] = None + updated_at: typing.Optional[str] = None + modified_by_id: typing.Optional[str] = None + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/app_variant_revision.py b/services/completion-stateless-sdk/agenta/client/backend/types/app_variant_revision.py new file mode 100644 index 0000000000..fbb9c6c7f0 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/app_variant_revision.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .config_db import ConfigDb +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class AppVariantRevision(UniversalBaseModel): + revision: int + modified_by: str + config: ConfigDb + created_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/base_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/base_output.py new file mode 100644 index 0000000000..ba70c79376 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/base_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class BaseOutput(UniversalBaseModel): + base_id: str + base_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/body_import_testset.py b/services/completion-stateless-sdk/agenta/client/backend/types/body_import_testset.py new file mode 100644 index 0000000000..12f7ad7453 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/body_import_testset.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class BodyImportTestset(UniversalBaseModel): + endpoint: typing.Optional[str] = None + testset_name: typing.Optional[str] = None + app_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/collect_status_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/collect_status_response.py new file mode 100644 index 0000000000..d52eed32ce --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/collect_status_response.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CollectStatusResponse(UniversalBaseModel): + version: str + status: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/config_db.py b/services/completion-stateless-sdk/agenta/client/backend/types/config_db.py new file mode 100644 index 0000000000..c11ef70b91 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/config_db.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConfigDb(UniversalBaseModel): + config_name: str + parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/config_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/config_dto.py new file mode 100644 index 0000000000..6d012c8148 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/config_dto.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .reference_dto import ReferenceDto +from .lifecycle_dto import LifecycleDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConfigDto(UniversalBaseModel): + params: typing.Dict[str, typing.Optional[typing.Any]] + url: typing.Optional[str] = None + application_ref: typing.Optional[ReferenceDto] = None + service_ref: typing.Optional[ReferenceDto] = None + variant_ref: typing.Optional[ReferenceDto] = None + environment_ref: typing.Optional[ReferenceDto] = None + application_lifecycle: typing.Optional[LifecycleDto] = None + service_lifecycle: typing.Optional[LifecycleDto] = None + variant_lifecycle: typing.Optional[LifecycleDto] = None + environment_lifecycle: typing.Optional[LifecycleDto] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/config_response_model.py b/services/completion-stateless-sdk/agenta/client/backend/types/config_response_model.py new file mode 100644 index 0000000000..45b6ffb420 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/config_response_model.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .reference_dto import ReferenceDto +from .lifecycle_dto import LifecycleDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ConfigResponseModel(UniversalBaseModel): + params: typing.Dict[str, typing.Optional[typing.Any]] + url: typing.Optional[str] = None + application_ref: typing.Optional[ReferenceDto] = None + service_ref: typing.Optional[ReferenceDto] = None + variant_ref: typing.Optional[ReferenceDto] = None + environment_ref: typing.Optional[ReferenceDto] = None + application_lifecycle: typing.Optional[LifecycleDto] = None + service_lifecycle: typing.Optional[LifecycleDto] = None + variant_lifecycle: typing.Optional[LifecycleDto] = None + environment_lifecycle: typing.Optional[LifecycleDto] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/correct_answer.py b/services/completion-stateless-sdk/agenta/client/backend/types/correct_answer.py new file mode 100644 index 0000000000..f0d8340c70 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/correct_answer.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CorrectAnswer(UniversalBaseModel): + key: str + value: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/create_app_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/create_app_output.py new file mode 100644 index 0000000000..0cfa349505 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/create_app_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CreateAppOutput(UniversalBaseModel): + app_id: str + app_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/create_span.py b/services/completion-stateless-sdk/agenta/client/backend/types/create_span.py new file mode 100644 index 0000000000..edc44590ad --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/create_span.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .outputs import Outputs +import datetime as dt +from .llm_tokens import LlmTokens +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class CreateSpan(UniversalBaseModel): + id: str + app_id: str + project_id: typing.Optional[str] = None + variant_id: typing.Optional[str] = None + variant_name: typing.Optional[str] = None + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + internals: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + outputs: typing.Optional[Outputs] = None + config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + environment: typing.Optional[str] = None + tags: typing.Optional[typing.List[str]] = None + token_consumption: typing.Optional[int] = None + name: str + parent_span_id: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + spankind: str + status: str + user: typing.Optional[str] = None + start_time: dt.datetime + end_time: dt.datetime + tokens: typing.Optional[LlmTokens] = None + cost: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/create_trace_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/create_trace_response.py new file mode 100644 index 0000000000..41e67fa7ca --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/create_trace_response.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class CreateTraceResponse(UniversalBaseModel): + message: str + data: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/docker_env_vars.py b/services/completion-stateless-sdk/agenta/client/backend/types/docker_env_vars.py new file mode 100644 index 0000000000..71da4ea95b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/docker_env_vars.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class DockerEnvVars(UniversalBaseModel): + env_vars: typing.Dict[str, str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/environment_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/environment_output.py new file mode 100644 index 0000000000..57d1216bfd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/environment_output.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EnvironmentOutput(UniversalBaseModel): + name: str + app_id: str + project_id: str + deployed_app_variant_id: typing.Optional[str] = None + deployed_variant_name: typing.Optional[str] = None + deployed_app_variant_revision_id: typing.Optional[str] = None + revision: typing.Optional[int] = None + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/environment_output_extended.py b/services/completion-stateless-sdk/agenta/client/backend/types/environment_output_extended.py new file mode 100644 index 0000000000..1b0ef8b470 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/environment_output_extended.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .environment_revision import EnvironmentRevision +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EnvironmentOutputExtended(UniversalBaseModel): + name: str + app_id: str + project_id: str + deployed_app_variant_id: typing.Optional[str] = None + deployed_variant_name: typing.Optional[str] = None + deployed_app_variant_revision_id: typing.Optional[str] = None + revision: typing.Optional[int] = None + revisions: typing.List[EnvironmentRevision] + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/environment_revision.py b/services/completion-stateless-sdk/agenta/client/backend/types/environment_revision.py new file mode 100644 index 0000000000..a1e2d29231 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/environment_revision.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EnvironmentRevision(UniversalBaseModel): + id: str + revision: int + modified_by: str + deployed_app_variant_revision: typing.Optional[str] = None + deployment: typing.Optional[str] = None + created_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/error.py b/services/completion-stateless-sdk/agenta/client/backend/types/error.py new file mode 100644 index 0000000000..4b47a34d06 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/error.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Error(UniversalBaseModel): + message: str + stacktrace: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation.py new file mode 100644 index 0000000000..75c947ee11 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .result import Result +from .aggregated_result import AggregatedResult +import datetime as dt +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Evaluation(UniversalBaseModel): + id: str + app_id: str + project_id: str + variant_ids: typing.List[str] + variant_names: typing.List[str] + variant_revision_ids: typing.List[str] + revisions: typing.List[str] + testset_id: typing.Optional[str] = None + testset_name: typing.Optional[str] = None + status: Result + aggregated_results: typing.List[AggregatedResult] + average_cost: typing.Optional[Result] = None + total_cost: typing.Optional[Result] = None + average_latency: typing.Optional[Result] = None + created_at: dt.datetime + updated_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario.py new file mode 100644 index 0000000000..1a856e7a24 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_scenario_input import EvaluationScenarioInput +from .evaluation_scenario_output import EvaluationScenarioOutput +from .correct_answer import CorrectAnswer +from .evaluation_scenario_result import EvaluationScenarioResult +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationScenario(UniversalBaseModel): + id: typing.Optional[str] = None + evaluation_id: str + inputs: typing.List[EvaluationScenarioInput] + outputs: typing.List[EvaluationScenarioOutput] + correct_answers: typing.Optional[typing.List[CorrectAnswer]] = None + is_pinned: typing.Optional[bool] = None + note: typing.Optional[str] = None + results: typing.List[EvaluationScenarioResult] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_input.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_input.py new file mode 100644 index 0000000000..bbc89f33c0 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_input.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationScenarioInput(UniversalBaseModel): + name: str + type: str + value: typing.Optional[typing.Any] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_output.py new file mode 100644 index 0000000000..4c1f489f59 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_output.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .result import Result +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluationScenarioOutput(UniversalBaseModel): + result: Result + cost: typing.Optional[float] = None + latency: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_result.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_result.py new file mode 100644 index 0000000000..8bb8f3e9ea --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_result.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .result import Result +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class EvaluationScenarioResult(UniversalBaseModel): + evaluator_config: str + result: Result + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_score_update.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_score_update.py new file mode 100644 index 0000000000..f76d9f6d22 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_scenario_score_update.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class EvaluationScenarioScoreUpdate(UniversalBaseModel): + score: float + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_status_enum.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_status_enum.py new file mode 100644 index 0000000000..96aa452f19 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_status_enum.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationStatusEnum = typing.Union[ + typing.Literal[ + "EVALUATION_INITIALIZED", + "EVALUATION_STARTED", + "EVALUATION_FINISHED", + "EVALUATION_FINISHED_WITH_ERRORS", + "EVALUATION_FAILED", + "EVALUATION_AGGREGATION_FAILED", + ], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_type.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_type.py new file mode 100644 index 0000000000..ef63bb721d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluation_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationType = typing.Union[ + typing.Literal["human_a_b_testing", "single_model_test"], typing.Any +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluator.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator.py new file mode 100644 index 0000000000..bafa1ba61d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Evaluator(UniversalBaseModel): + name: str + key: str + direct_use: bool + settings_template: typing.Dict[str, typing.Optional[typing.Any]] + description: typing.Optional[str] = None + oss: typing.Optional[bool] = None + requires_llm_api_keys: typing.Optional[bool] = None + tags: typing.List[str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_config.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_config.py new file mode 100644 index 0000000000..59fe19fc40 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_config.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluatorConfig(UniversalBaseModel): + id: str + name: str + project_id: str + evaluator_key: str + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = None + created_at: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py new file mode 100644 index 0000000000..91cf2f87d4 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluatorMappingOutputInterface(UniversalBaseModel): + outputs: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_output_interface.py b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_output_interface.py new file mode 100644 index 0000000000..acd295c331 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/evaluator_output_interface.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class EvaluatorOutputInterface(UniversalBaseModel): + outputs: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/exception_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/exception_dto.py new file mode 100644 index 0000000000..a3e780d345 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/exception_dto.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import datetime as dt +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ExceptionDto(UniversalBaseModel): + timestamp: dt.datetime + type: str + message: typing.Optional[str] = None + stacktrace: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/get_config_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/get_config_response.py new file mode 100644 index 0000000000..6f65d16d13 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/get_config_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class GetConfigResponse(UniversalBaseModel): + config_name: str + current_version: int + parameters: typing.Dict[str, typing.Optional[typing.Any]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/http_validation_error.py b/services/completion-stateless-sdk/agenta/client/backend/types/http_validation_error.py new file mode 100644 index 0000000000..988ebed269 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/http_validation_error.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .validation_error import ValidationError +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HttpValidationError(UniversalBaseModel): + detail: typing.Optional[typing.List[ValidationError]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation.py b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation.py new file mode 100644 index 0000000000..9d9ba8b490 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluation(UniversalBaseModel): + id: str + app_id: str + project_id: str + evaluation_type: str + variant_ids: typing.List[str] + variant_names: typing.List[str] + variants_revision_ids: typing.List[str] + revisions: typing.List[str] + testset_id: str + testset_name: str + status: str + created_at: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario.py b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario.py new file mode 100644 index 0000000000..1321dde09b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .human_evaluation_scenario_input import HumanEvaluationScenarioInput +from .human_evaluation_scenario_output import HumanEvaluationScenarioOutput +from .score import Score +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluationScenario(UniversalBaseModel): + id: typing.Optional[str] = None + evaluation_id: str + inputs: typing.List[HumanEvaluationScenarioInput] + outputs: typing.List[HumanEvaluationScenarioOutput] + vote: typing.Optional[str] = None + score: typing.Optional[Score] = None + correct_answer: typing.Optional[str] = None + is_pinned: typing.Optional[bool] = None + note: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_input.py b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_input.py new file mode 100644 index 0000000000..e2fa9b4082 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_input.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class HumanEvaluationScenarioInput(UniversalBaseModel): + input_name: str + input_value: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_output.py new file mode 100644 index 0000000000..affac190e1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class HumanEvaluationScenarioOutput(UniversalBaseModel): + variant_id: str + variant_output: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_update.py b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_update.py new file mode 100644 index 0000000000..6bf28fd914 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_scenario_update.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .score import Score +from .human_evaluation_scenario_output import HumanEvaluationScenarioOutput +from .human_evaluation_scenario_input import HumanEvaluationScenarioInput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluationScenarioUpdate(UniversalBaseModel): + vote: typing.Optional[str] = None + score: typing.Optional[Score] = None + correct_answer: typing.Optional[str] = None + outputs: typing.Optional[typing.List[HumanEvaluationScenarioOutput]] = None + inputs: typing.Optional[typing.List[HumanEvaluationScenarioInput]] = None + is_pinned: typing.Optional[bool] = None + note: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_update.py b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_update.py new file mode 100644 index 0000000000..78a453195f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/human_evaluation_update.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_status_enum import EvaluationStatusEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class HumanEvaluationUpdate(UniversalBaseModel): + status: typing.Optional[EvaluationStatusEnum] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/image.py b/services/completion-stateless-sdk/agenta/client/backend/types/image.py new file mode 100644 index 0000000000..f8c17d044e --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/image.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Image(UniversalBaseModel): + type: typing.Optional[str] = None + docker_id: str + tags: str + organization_id: typing.Optional[str] = None + workspace_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/invite_request.py b/services/completion-stateless-sdk/agenta/client/backend/types/invite_request.py new file mode 100644 index 0000000000..9c16852671 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/invite_request.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class InviteRequest(UniversalBaseModel): + email: str + roles: typing.List[str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/lifecycle_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/lifecycle_dto.py new file mode 100644 index 0000000000..098f83505e --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/lifecycle_dto.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LifecycleDto(UniversalBaseModel): + created_at: typing.Optional[str] = None + updated_at: typing.Optional[str] = None + updated_by_id: typing.Optional[str] = None + updated_by: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/link_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/link_dto.py new file mode 100644 index 0000000000..91c76de759 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/link_dto.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .tree_type import TreeType +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LinkDto(UniversalBaseModel): + type: TreeType = "invocation" + id: str + tree_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/list_api_keys_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/list_api_keys_response.py new file mode 100644 index 0000000000..0ed671ca27 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/list_api_keys_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ListApiKeysResponse(UniversalBaseModel): + prefix: str + created_at: str + last_used_at: typing.Optional[str] = None + expiration_date: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/llm_run_rate_limit.py b/services/completion-stateless-sdk/agenta/client/backend/types/llm_run_rate_limit.py new file mode 100644 index 0000000000..b0c9f46899 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/llm_run_rate_limit.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class LlmRunRateLimit(UniversalBaseModel): + batch_size: int + max_retries: int + retry_delay: int + delay_between_batches: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/llm_tokens.py b/services/completion-stateless-sdk/agenta/client/backend/types/llm_tokens.py new file mode 100644 index 0000000000..7336d8d561 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/llm_tokens.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LlmTokens(UniversalBaseModel): + prompt_tokens: typing.Optional[int] = None + completion_tokens: typing.Optional[int] = None + total_tokens: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/lm_providers_enum.py b/services/completion-stateless-sdk/agenta/client/backend/types/lm_providers_enum.py new file mode 100644 index 0000000000..6aa756ba0e --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/lm_providers_enum.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LmProvidersEnum = typing.Union[ + typing.Literal[ + "OPENAI_API_KEY", + "MISTRAL_API_KEY", + "COHERE_API_KEY", + "ANTHROPIC_API_KEY", + "ANYSCALE_API_KEY", + "PERPLEXITYAI_API_KEY", + "DEEPINFRA_API_KEY", + "TOGETHERAI_API_KEY", + "ALEPHALPHA_API_KEY", + "OPENROUTER_API_KEY", + "GROQ_API_KEY", + "GEMINI_API_KEY", + ], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/new_human_evaluation.py b/services/completion-stateless-sdk/agenta/client/backend/types/new_human_evaluation.py new file mode 100644 index 0000000000..9fde3d9e3c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/new_human_evaluation.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_type import EvaluationType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class NewHumanEvaluation(UniversalBaseModel): + app_id: str + variant_ids: typing.List[str] + evaluation_type: EvaluationType + inputs: typing.List[str] + testset_id: str + status: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/new_testset.py b/services/completion-stateless-sdk/agenta/client/backend/types/new_testset.py new file mode 100644 index 0000000000..9643d26a1f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/new_testset.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class NewTestset(UniversalBaseModel): + name: str + csvdata: typing.List[typing.Dict[str, typing.Optional[typing.Any]]] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/node_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/node_dto.py new file mode 100644 index 0000000000..6caa131c32 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/node_dto.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .node_type import NodeType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class NodeDto(UniversalBaseModel): + id: str + name: str + type: typing.Optional[NodeType] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/node_type.py b/services/completion-stateless-sdk/agenta/client/backend/types/node_type.py new file mode 100644 index 0000000000..8abbe89309 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/node_type.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +NodeType = typing.Union[ + typing.Literal[ + "agent", + "workflow", + "chain", + "task", + "tool", + "embedding", + "query", + "completion", + "chat", + "rerank", + ], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_context_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_context_dto.py new file mode 100644 index 0000000000..ab99bfac46 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_context_dto.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class OTelContextDto(UniversalBaseModel): + trace_id: str + span_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_event_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_event_dto.py new file mode 100644 index 0000000000..e5eed83822 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_event_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelEventDto(UniversalBaseModel): + name: str + timestamp: str + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_extra_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_extra_dto.py new file mode 100644 index 0000000000..c7e9294db3 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_extra_dto.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .o_tel_event_dto import OTelEventDto +from .o_tel_link_dto import OTelLinkDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelExtraDto(UniversalBaseModel): + kind: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + events: typing.Optional[typing.List[OTelEventDto]] = None + links: typing.Optional[typing.List[OTelLinkDto]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_link_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_link_dto.py new file mode 100644 index 0000000000..75ec3d1f1b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_link_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .o_tel_context_dto import OTelContextDto +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelLinkDto(UniversalBaseModel): + context: OTelContextDto + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_span_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_span_dto.py new file mode 100644 index 0000000000..66632172c9 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_span_dto.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .o_tel_context_dto import OTelContextDto +import typing +from .o_tel_span_kind import OTelSpanKind +import datetime as dt +from .o_tel_status_code import OTelStatusCode +from .o_tel_event_dto import OTelEventDto +from .o_tel_link_dto import OTelLinkDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelSpanDto(UniversalBaseModel): + context: OTelContextDto + name: str + kind: typing.Optional[OTelSpanKind] = None + start_time: dt.datetime + end_time: dt.datetime + status_code: typing.Optional[OTelStatusCode] = None + status_message: typing.Optional[str] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + events: typing.Optional[typing.List[OTelEventDto]] = None + parent: typing.Optional[OTelContextDto] = None + links: typing.Optional[typing.List[OTelLinkDto]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_span_kind.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_span_kind.py new file mode 100644 index 0000000000..98ba7bf43c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_span_kind.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OTelSpanKind = typing.Union[ + typing.Literal[ + "SPAN_KIND_UNSPECIFIED", + "SPAN_KIND_INTERNAL", + "SPAN_KIND_SERVER", + "SPAN_KIND_CLIENT", + "SPAN_KIND_PRODUCER", + "SPAN_KIND_CONSUMER", + ], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_spans_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_spans_response.py new file mode 100644 index 0000000000..b9fb641427 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_spans_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .o_tel_span_dto import OTelSpanDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class OTelSpansResponse(UniversalBaseModel): + version: str + count: typing.Optional[int] = None + spans: typing.List[OTelSpanDto] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_status_code.py b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_status_code.py new file mode 100644 index 0000000000..d5a60e6006 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/o_tel_status_code.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OTelStatusCode = typing.Union[ + typing.Literal["STATUS_CODE_OK", "STATUS_CODE_ERROR", "STATUS_CODE_UNSET"], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/organization.py b/services/completion-stateless-sdk/agenta/client/backend/types/organization.py new file mode 100644 index 0000000000..c6f12ee0c7 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/organization.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Organization(UniversalBaseModel): + id: str + name: str + description: str + type: typing.Optional[str] = None + owner: str + workspaces: typing.Optional[typing.List[str]] = None + members: typing.Optional[typing.List[str]] = None + invitations: typing.Optional[typing.List[typing.Optional[typing.Any]]] = None + is_paying: typing.Optional[bool] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/organization_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/organization_output.py new file mode 100644 index 0000000000..702802f814 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/organization_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class OrganizationOutput(UniversalBaseModel): + id: str + name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/outputs.py b/services/completion-stateless-sdk/agenta/client/backend/types/outputs.py new file mode 100644 index 0000000000..f719851a4d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/outputs.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Outputs = typing.Union[typing.Dict[str, typing.Optional[typing.Any]], typing.List[str]] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/parent_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/parent_dto.py new file mode 100644 index 0000000000..7bf3c33715 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/parent_dto.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class ParentDto(UniversalBaseModel): + id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/permission.py b/services/completion-stateless-sdk/agenta/client/backend/types/permission.py new file mode 100644 index 0000000000..a32616dd16 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/permission.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Permission = typing.Union[ + typing.Literal[ + "read_system", + "view_application", + "edit_application", + "create_application", + "delete_application", + "create_app_variant", + "delete_app_variant", + "modify_variant_configurations", + "delete_application_variant", + "view_app_environment_deployment", + "edit_app_environment_deployment", + "create_app_environment_deployment", + "view_testset", + "edit_testset", + "create_testset", + "delete_testset", + "view_evaluation", + "run_evaluations", + "edit_evaluation", + "create_evaluation", + "delete_evaluation", + "deploy_application", + "view_workspace", + "edit_workspace", + "create_workspace", + "delete_workspace", + "modify_user_roles", + "add_new_user_to_workspace", + "edit_organization", + "delete_organization", + "add_new_user_to_organization", + ], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/reference_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/reference_dto.py new file mode 100644 index 0000000000..67fd56b0a8 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/reference_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ReferenceDto(UniversalBaseModel): + slug: typing.Optional[str] = None + version: typing.Optional[int] = None + id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/reference_request_model.py b/services/completion-stateless-sdk/agenta/client/backend/types/reference_request_model.py new file mode 100644 index 0000000000..91d1ad80d5 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/reference_request_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ReferenceRequestModel(UniversalBaseModel): + slug: typing.Optional[str] = None + version: typing.Optional[int] = None + id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/result.py b/services/completion-stateless-sdk/agenta/client/backend/types/result.py new file mode 100644 index 0000000000..e651345b9c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/result.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .error import Error +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class Result(UniversalBaseModel): + type: str + value: typing.Optional[typing.Optional[typing.Any]] = None + error: typing.Optional[Error] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/root_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/root_dto.py new file mode 100644 index 0000000000..7b7e8f5aeb --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/root_dto.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class RootDto(UniversalBaseModel): + id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/score.py b/services/completion-stateless-sdk/agenta/client/backend/types/score.py new file mode 100644 index 0000000000..8e90b82171 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/score.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Score = typing.Union[str, int] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/simple_evaluation_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/simple_evaluation_output.py new file mode 100644 index 0000000000..ae5997391c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/simple_evaluation_output.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .evaluation_type import EvaluationType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SimpleEvaluationOutput(UniversalBaseModel): + id: str + variant_ids: typing.List[str] + app_id: str + status: str + evaluation_type: EvaluationType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/span.py b/services/completion-stateless-sdk/agenta/client/backend/types/span.py new file mode 100644 index 0000000000..5cc2557b24 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/span.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import datetime as dt +from .span_variant import SpanVariant +from .span_status_code import SpanStatusCode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class Span(UniversalBaseModel): + id: str + name: str + project_id: typing.Optional[str] = None + parent_span_id: typing.Optional[str] = None + created_at: dt.datetime + variant: SpanVariant + environment: typing.Optional[str] = None + spankind: str + status: SpanStatusCode + metadata: typing.Dict[str, typing.Optional[typing.Any]] + trace_id: str + user_id: typing.Optional[str] = None + content: typing.Dict[str, typing.Optional[typing.Any]] + children: typing.Optional[typing.List["Span"]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(Span) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/span_detail.py b/services/completion-stateless-sdk/agenta/client/backend/types/span_detail.py new file mode 100644 index 0000000000..44c9660657 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/span_detail.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span import Span +import typing +import datetime as dt +from .span_variant import SpanVariant +from .span_status_code import SpanStatusCode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class SpanDetail(UniversalBaseModel): + id: str + name: str + project_id: typing.Optional[str] = None + parent_span_id: typing.Optional[str] = None + created_at: dt.datetime + variant: SpanVariant + environment: typing.Optional[str] = None + spankind: str + status: SpanStatusCode + metadata: typing.Dict[str, typing.Optional[typing.Any]] + trace_id: str + user_id: typing.Optional[str] = None + content: typing.Dict[str, typing.Optional[typing.Any]] + children: typing.Optional[typing.List[Span]] = None + config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(Span, SpanDetail=SpanDetail) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/span_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/span_dto.py new file mode 100644 index 0000000000..80fdb2b0a3 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/span_dto.py @@ -0,0 +1,54 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lifecycle_dto import LifecycleDto +from .root_dto import RootDto +from .tree_dto import TreeDto +from .node_dto import NodeDto +from .parent_dto import ParentDto +from .time_dto import TimeDto +from .status_dto import StatusDto +from .exception_dto import ExceptionDto +from .link_dto import LinkDto +from .o_tel_extra_dto import OTelExtraDto +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class SpanDto(UniversalBaseModel): + lifecycle: typing.Optional[LifecycleDto] = None + root: RootDto + tree: TreeDto + node: NodeDto + parent: typing.Optional[ParentDto] = None + time: TimeDto + status: StatusDto + exception: typing.Optional[ExceptionDto] = None + data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + metrics: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + meta: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + refs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + links: typing.Optional[typing.List[LinkDto]] = None + otel: typing.Optional[OTelExtraDto] = None + nodes: typing.Optional[ + typing.Dict[str, typing.Optional["SpanDtoNodesValue"]] + ] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .span_dto_nodes_value import SpanDtoNodesValue # noqa: E402 + +update_forward_refs(SpanDto) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/span_dto_nodes_value.py b/services/completion-stateless-sdk/agenta/client/backend/types/span_dto_nodes_value.py new file mode 100644 index 0000000000..93e28b70de --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/span_dto_nodes_value.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +import typing + +if typing.TYPE_CHECKING: + from .span_dto import SpanDto +SpanDtoNodesValue = typing.Union["SpanDto", typing.List["SpanDto"]] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/span_status_code.py b/services/completion-stateless-sdk/agenta/client/backend/types/span_status_code.py new file mode 100644 index 0000000000..cb5a002953 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/span_status_code.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpanStatusCode = typing.Union[typing.Literal["UNSET", "OK", "ERROR"], typing.Any] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/span_variant.py b/services/completion-stateless-sdk/agenta/client/backend/types/span_variant.py new file mode 100644 index 0000000000..4471b2229c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/span_variant.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SpanVariant(UniversalBaseModel): + variant_id: typing.Optional[str] = None + variant_name: typing.Optional[str] = None + revision: typing.Optional[int] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/status_code.py b/services/completion-stateless-sdk/agenta/client/backend/types/status_code.py new file mode 100644 index 0000000000..ab7c307ab7 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/status_code.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +StatusCode = typing.Union[typing.Literal["UNSET", "OK", "ERROR"], typing.Any] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/status_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/status_dto.py new file mode 100644 index 0000000000..44f2ef907b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/status_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .status_code import StatusCode +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class StatusDto(UniversalBaseModel): + code: StatusCode + message: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/template.py b/services/completion-stateless-sdk/agenta/client/backend/types/template.py new file mode 100644 index 0000000000..af02645289 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/template.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .template_image_info import TemplateImageInfo +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class Template(UniversalBaseModel): + id: str + image: TemplateImageInfo + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/template_image_info.py b/services/completion-stateless-sdk/agenta/client/backend/types/template_image_info.py new file mode 100644 index 0000000000..b179682fda --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/template_image_info.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import datetime as dt +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TemplateImageInfo(UniversalBaseModel): + name: str + size: typing.Optional[int] = None + digest: typing.Optional[str] = None + title: str + description: str + last_pushed: typing.Optional[dt.datetime] = None + repo_name: typing.Optional[str] = None + template_uri: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/test_set_output_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/test_set_output_response.py new file mode 100644 index 0000000000..34023d5c7a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/test_set_output_response.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing_extensions +from ..core.serialization import FieldMetadata +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class TestSetOutputResponse(UniversalBaseModel): + id: typing_extensions.Annotated[str, FieldMetadata(alias="_id")] + name: str + created_at: str + updated_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/test_set_simple_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/test_set_simple_response.py new file mode 100644 index 0000000000..659d429caa --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/test_set_simple_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class TestSetSimpleResponse(UniversalBaseModel): + id: str + name: str + created_at: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/time_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/time_dto.py new file mode 100644 index 0000000000..5def8ab023 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/time_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import datetime as dt +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class TimeDto(UniversalBaseModel): + start: dt.datetime + end: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/trace_detail.py b/services/completion-stateless-sdk/agenta/client/backend/types/trace_detail.py new file mode 100644 index 0000000000..22ba262a83 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/trace_detail.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +from .span import Span +import typing +import datetime as dt +from .span_variant import SpanVariant +from .span_status_code import SpanStatusCode +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class TraceDetail(UniversalBaseModel): + id: str + name: str + project_id: typing.Optional[str] = None + parent_span_id: typing.Optional[str] = None + created_at: dt.datetime + variant: SpanVariant + environment: typing.Optional[str] = None + spankind: str + status: SpanStatusCode + metadata: typing.Dict[str, typing.Optional[typing.Any]] + trace_id: str + user_id: typing.Optional[str] = None + content: typing.Dict[str, typing.Optional[typing.Any]] + children: typing.Optional[typing.List[Span]] = None + config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(Span, TraceDetail=TraceDetail) diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/tree_dto.py b/services/completion-stateless-sdk/agenta/client/backend/types/tree_dto.py new file mode 100644 index 0000000000..dfb98faaac --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/tree_dto.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .tree_type import TreeType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class TreeDto(UniversalBaseModel): + id: str + type: typing.Optional[TreeType] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/tree_type.py b/services/completion-stateless-sdk/agenta/client/backend/types/tree_type.py new file mode 100644 index 0000000000..3be7057bec --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/tree_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TreeType = typing.Literal["invocation"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/update_app_output.py b/services/completion-stateless-sdk/agenta/client/backend/types/update_app_output.py new file mode 100644 index 0000000000..deede4ef37 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/update_app_output.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class UpdateAppOutput(UniversalBaseModel): + app_id: str + app_name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/uri.py b/services/completion-stateless-sdk/agenta/client/backend/types/uri.py new file mode 100644 index 0000000000..7c9a2fab47 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/uri.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class Uri(UniversalBaseModel): + uri: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/validation_error.py b/services/completion-stateless-sdk/agenta/client/backend/types/validation_error.py new file mode 100644 index 0000000000..4b6d592bda --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/validation_error.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .validation_error_loc_item import ValidationErrorLocItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ValidationError(UniversalBaseModel): + loc: typing.List[ValidationErrorLocItem] + msg: str + type: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/validation_error_loc_item.py b/services/completion-stateless-sdk/agenta/client/backend/types/validation_error_loc_item.py new file mode 100644 index 0000000000..9a0a83fef5 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/validation_error_loc_item.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ValidationErrorLocItem = typing.Union[str, int] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/variant_action.py b/services/completion-stateless-sdk/agenta/client/backend/types/variant_action.py new file mode 100644 index 0000000000..0dec29eea2 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/variant_action.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .variant_action_enum import VariantActionEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class VariantAction(UniversalBaseModel): + action: VariantActionEnum + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/variant_action_enum.py b/services/completion-stateless-sdk/agenta/client/backend/types/variant_action_enum.py new file mode 100644 index 0000000000..1bc746b0fd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/variant_action_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VariantActionEnum = typing.Union[typing.Literal["START", "STOP"], typing.Any] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/with_pagination.py b/services/completion-stateless-sdk/agenta/client/backend/types/with_pagination.py new file mode 100644 index 0000000000..cb02dbd92b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/with_pagination.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import typing_extensions +from ..core.serialization import FieldMetadata +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WithPagination(UniversalBaseModel): + data: typing.List[typing.Optional[typing.Any]] + total: int + page: int + page_size: typing_extensions.Annotated[int, FieldMetadata(alias="pageSize")] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/workspace_member_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_member_response.py new file mode 100644 index 0000000000..60f29c7110 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_member_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .workspace_permission import WorkspacePermission +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspaceMemberResponse(UniversalBaseModel): + user: typing.Dict[str, typing.Optional[typing.Any]] + roles: typing.List[WorkspacePermission] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/workspace_permission.py b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_permission.py new file mode 100644 index 0000000000..028e712cd6 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_permission.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .workspace_role import WorkspaceRole +import typing +from .permission import Permission +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspacePermission(UniversalBaseModel): + role_name: WorkspaceRole + role_description: typing.Optional[str] = None + permissions: typing.Optional[typing.List[Permission]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/workspace_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_response.py new file mode 100644 index 0000000000..a042d463e8 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .workspace_member_response import WorkspaceMemberResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspaceResponse(UniversalBaseModel): + created_at: typing.Optional[str] = None + updated_at: typing.Optional[str] = None + id: str + name: str + description: typing.Optional[str] = None + type: typing.Optional[str] = None + organization: str + members: typing.Optional[typing.List[WorkspaceMemberResponse]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/workspace_role.py b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_role.py new file mode 100644 index 0000000000..065ea0abf3 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_role.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +WorkspaceRole = typing.Union[ + typing.Literal[ + "owner", + "viewer", + "editor", + "evaluator", + "workspace_admin", + "deployment_manager", + ], + typing.Any, +] diff --git a/services/completion-stateless-sdk/agenta/client/backend/types/workspace_role_response.py b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_role_response.py new file mode 100644 index 0000000000..f210591fcb --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/types/workspace_role_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .workspace_role import WorkspaceRole +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class WorkspaceRoleResponse(UniversalBaseModel): + role_name: WorkspaceRole + role_description: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/services/completion-stateless-sdk/agenta/client/backend/variants/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/variants/__init__.py new file mode 100644 index 0000000000..71317185ab --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/variants/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import AddVariantFromBaseAndConfigResponse + +__all__ = ["AddVariantFromBaseAndConfigResponse"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/variants/client.py b/services/completion-stateless-sdk/agenta/client/backend/variants/client.py new file mode 100644 index 0000000000..389a77f778 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/variants/client.py @@ -0,0 +1,2748 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from .types.add_variant_from_base_and_config_response import ( + AddVariantFromBaseAndConfigResponse, +) +from ..core.pydantic_utilities import parse_obj_as +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.app_variant_response import AppVariantResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..types.variant_action import VariantAction +from ..types.docker_env_vars import DockerEnvVars +from ..types.uri import Uri +from ..core.serialization import convert_and_respect_annotation_metadata +from ..types.app_variant_revision import AppVariantRevision +from ..types.reference_request_model import ReferenceRequestModel +from ..types.config_response_model import ConfigResponseModel +from ..types.config_dto import ConfigDto +from ..types.reference_dto import ReferenceDto +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class VariantsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def add_variant_from_base_and_config( + self, + *, + base_id: str, + new_variant_name: str, + new_config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> AddVariantFromBaseAndConfigResponse: + """ + Add a new variant based on an existing one. + Same as POST /config + + Args: + payload (AddVariantFromBasePayload): Payload containing base variant ID, new variant name, and parameters. + stoken_session (SessionContainer, optional): Session container. Defaults to result of verify_session(). + + Raises: + HTTPException: Raised if the variant could not be added or accessed. + + Returns: + Union[AppVariantResponse, Any]: New variant details or exception. + + Parameters + ---------- + base_id : str + + new_variant_name : str + + new_config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddVariantFromBaseAndConfigResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.add_variant_from_base_and_config( + base_id="base_id", + new_variant_name="new_variant_name", + new_config_name="new_config_name", + parameters={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/from-base", + method="POST", + json={ + "base_id": base_id, + "new_variant_name": new_variant_name, + "new_config_name": new_config_name, + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddVariantFromBaseAndConfigResponse, + parse_obj_as( + type_=AddVariantFromBaseAndConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.get_variant( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def start_variant( + self, + variant_id: str, + *, + action: VariantAction, + env_vars: typing.Optional[DockerEnvVars] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Start a variant of an app. + + Args: + variant_id (str): The ID of the variant to start. + action (VariantAction): The action to perform on the variant (start). + env_vars (Optional[DockerEnvVars], optional): The environment variables to inject to the Docker container. Defaults to None. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + URI: The URL of the started variant. + + Raises: + HTTPException: If the app container cannot be started. + + Parameters + ---------- + variant_id : str + + action : VariantAction + + env_vars : typing.Optional[DockerEnvVars] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + from agenta import AgentaApi, VariantAction + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.start_variant( + variant_id="variant_id", + action=VariantAction( + action="START", + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="PUT", + json={ + "action": convert_and_respect_annotation_metadata( + object_=action, annotation=VariantAction, direction="write" + ), + "env_vars": convert_and_respect_annotation_metadata( + object_=env_vars, annotation=DockerEnvVars, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Remove a variant from the server. + In the case it's the last variant using the image, stop the container and remove the image. + + Arguments: + app_variant -- AppVariant to remove + + Raises: + HTTPException: If there is a problem removing the app variant + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.remove_variant( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_variant_parameters( + self, + variant_id: str, + *, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the parameters for an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + payload (UpdateVariantParameterPayload): The payload containing the updated parameters. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If there is an error while trying to update the app variant. + + Returns: + JSONResponse: A JSON response containing the updated app variant parameters. + + Parameters + ---------- + variant_id : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.update_variant_parameters( + variant_id="variant_id", + parameters={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/parameters", + method="PUT", + json={ + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_variant_image( + self, + variant_id: str, + *, + docker_id: str, + tags: str, + type: typing.Optional[str] = OMIT, + organization_id: typing.Optional[str] = OMIT, + workspace_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the image used in an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + image (Image): The image information to update. + + Raises: + HTTPException: If an error occurs while trying to update the app variant. + + Returns: + JSONResponse: A JSON response indicating whether the update was successful or not. + + Parameters + ---------- + variant_id : str + + docker_id : str + + tags : str + + type : typing.Optional[str] + + organization_id : typing.Optional[str] + + workspace_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.update_variant_image( + variant_id="variant_id", + docker_id="docker_id", + tags="tags", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/image", + method="PUT", + json={ + "type": type, + "docker_id": docker_id, + "tags": tags, + "organization_id": organization_id, + "workspace_id": workspace_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def retrieve_variant_logs( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.retrieve_variant_logs( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/logs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant_revisions( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[AppVariantRevision]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantRevision] + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.get_variant_revisions( + variant_id="variant_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantRevision], + parse_obj_as( + type_=typing.List[AppVariantRevision], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_variant_revision( + self, + variant_id: str, + revision_number: int, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantRevision: + """ + Parameters + ---------- + variant_id : str + + revision_number : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantRevision + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.get_variant_revision( + variant_id="variant_id", + revision_number=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions/{jsonable_encoder(revision_number)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantRevision, + parse_obj_as( + type_=AppVariantRevision, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_add( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: ReferenceRequestModel, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : ReferenceRequestModel + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_add( + variant_ref=ReferenceRequestModel(), + application_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/add", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_fetch( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_fetch() + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/fetch", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_fork( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_fork() + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/fork", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_commit( + self, + *, + config: ConfigDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + config : ConfigDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi, ConfigDto + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_commit( + config=ConfigDto( + params={"key": "value"}, + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/commit", + method="POST", + json={ + "config": convert_and_respect_annotation_metadata( + object_=config, annotation=ConfigDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_deploy( + self, + *, + variant_ref: ReferenceRequestModel, + environment_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + environment_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_deploy( + variant_ref=ReferenceRequestModel(), + environment_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/deploy", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_delete( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> int: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + int + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_delete( + variant_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/delete", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + int, + parse_obj_as( + type_=int, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_list( + self, + *, + application_ref: ReferenceDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + application_ref : ReferenceDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceDto + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_list( + application_ref=ReferenceDto(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/list", + method="POST", + json={ + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, annotation=ReferenceDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def configs_history( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + from agenta import AgentaApi, ReferenceRequestModel + + client = AgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + client.variants.configs_history( + variant_ref=ReferenceRequestModel(), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "variants/configs/history", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncVariantsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def add_variant_from_base_and_config( + self, + *, + base_id: str, + new_variant_name: str, + new_config_name: str, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> AddVariantFromBaseAndConfigResponse: + """ + Add a new variant based on an existing one. + Same as POST /config + + Args: + payload (AddVariantFromBasePayload): Payload containing base variant ID, new variant name, and parameters. + stoken_session (SessionContainer, optional): Session container. Defaults to result of verify_session(). + + Raises: + HTTPException: Raised if the variant could not be added or accessed. + + Returns: + Union[AppVariantResponse, Any]: New variant details or exception. + + Parameters + ---------- + base_id : str + + new_variant_name : str + + new_config_name : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AddVariantFromBaseAndConfigResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.add_variant_from_base_and_config( + base_id="base_id", + new_variant_name="new_variant_name", + new_config_name="new_config_name", + parameters={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/from-base", + method="POST", + json={ + "base_id": base_id, + "new_variant_name": new_variant_name, + "new_config_name": new_config_name, + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AddVariantFromBaseAndConfigResponse, + parse_obj_as( + type_=AddVariantFromBaseAndConfigResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantResponse: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantResponse + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.get_variant( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantResponse, + parse_obj_as( + type_=AppVariantResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def start_variant( + self, + variant_id: str, + *, + action: VariantAction, + env_vars: typing.Optional[DockerEnvVars] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> Uri: + """ + Start a variant of an app. + + Args: + variant_id (str): The ID of the variant to start. + action (VariantAction): The action to perform on the variant (start). + env_vars (Optional[DockerEnvVars], optional): The environment variables to inject to the Docker container. Defaults to None. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Returns: + URI: The URL of the started variant. + + Raises: + HTTPException: If the app container cannot be started. + + Parameters + ---------- + variant_id : str + + action : VariantAction + + env_vars : typing.Optional[DockerEnvVars] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Uri + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, VariantAction + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.start_variant( + variant_id="variant_id", + action=VariantAction( + action="START", + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="PUT", + json={ + "action": convert_and_respect_annotation_metadata( + object_=action, annotation=VariantAction, direction="write" + ), + "env_vars": convert_and_respect_annotation_metadata( + object_=env_vars, annotation=DockerEnvVars, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Uri, + parse_obj_as( + type_=Uri, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_variant( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Remove a variant from the server. + In the case it's the last variant using the image, stop the container and remove the image. + + Arguments: + app_variant -- AppVariant to remove + + Raises: + HTTPException: If there is a problem removing the app variant + + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.remove_variant( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_variant_parameters( + self, + variant_id: str, + *, + parameters: typing.Dict[str, typing.Optional[typing.Any]], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the parameters for an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + payload (UpdateVariantParameterPayload): The payload containing the updated parameters. + stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()). + + Raises: + HTTPException: If there is an error while trying to update the app variant. + + Returns: + JSONResponse: A JSON response containing the updated app variant parameters. + + Parameters + ---------- + variant_id : str + + parameters : typing.Dict[str, typing.Optional[typing.Any]] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.update_variant_parameters( + variant_id="variant_id", + parameters={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/parameters", + method="PUT", + json={ + "parameters": parameters, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_variant_image( + self, + variant_id: str, + *, + docker_id: str, + tags: str, + type: typing.Optional[str] = OMIT, + organization_id: typing.Optional[str] = OMIT, + workspace_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Updates the image used in an app variant. + + Args: + variant_id (str): The ID of the app variant to update. + image (Image): The image information to update. + + Raises: + HTTPException: If an error occurs while trying to update the app variant. + + Returns: + JSONResponse: A JSON response indicating whether the update was successful or not. + + Parameters + ---------- + variant_id : str + + docker_id : str + + tags : str + + type : typing.Optional[str] + + organization_id : typing.Optional[str] + + workspace_id : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.update_variant_image( + variant_id="variant_id", + docker_id="docker_id", + tags="tags", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/image", + method="PUT", + json={ + "type": type, + "docker_id": docker_id, + "tags": tags, + "organization_id": organization_id, + "workspace_id": workspace_id, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def retrieve_variant_logs( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.retrieve_variant_logs( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/logs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant_revisions( + self, + variant_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[AppVariantRevision]: + """ + Parameters + ---------- + variant_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[AppVariantRevision] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.get_variant_revisions( + variant_id="variant_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[AppVariantRevision], + parse_obj_as( + type_=typing.List[AppVariantRevision], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_variant_revision( + self, + variant_id: str, + revision_number: int, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> AppVariantRevision: + """ + Parameters + ---------- + variant_id : str + + revision_number : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AppVariantRevision + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.get_variant_revision( + variant_id="variant_id", + revision_number=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"variants/{jsonable_encoder(variant_id)}/revisions/{jsonable_encoder(revision_number)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AppVariantRevision, + parse_obj_as( + type_=AppVariantRevision, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_add( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: ReferenceRequestModel, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : ReferenceRequestModel + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_add( + variant_ref=ReferenceRequestModel(), + application_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/add", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_fetch( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_fetch() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/fetch", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_fork( + self, + *, + variant_ref: typing.Optional[ReferenceRequestModel] = OMIT, + environment_ref: typing.Optional[ReferenceRequestModel] = OMIT, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : typing.Optional[ReferenceRequestModel] + + environment_ref : typing.Optional[ReferenceRequestModel] + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_fork() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/fork", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_commit( + self, + *, + config: ConfigDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + config : ConfigDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ConfigDto + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_commit( + config=ConfigDto( + params={"key": "value"}, + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/commit", + method="POST", + json={ + "config": convert_and_respect_annotation_metadata( + object_=config, annotation=ConfigDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_deploy( + self, + *, + variant_ref: ReferenceRequestModel, + environment_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ConfigResponseModel: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + environment_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ConfigResponseModel + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_deploy( + variant_ref=ReferenceRequestModel(), + environment_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/deploy", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "environment_ref": convert_and_respect_annotation_metadata( + object_=environment_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ConfigResponseModel, + parse_obj_as( + type_=ConfigResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_delete( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> int: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + int + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_delete( + variant_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/delete", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + int, + parse_obj_as( + type_=int, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_list( + self, + *, + application_ref: ReferenceDto, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + application_ref : ReferenceDto + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceDto + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_list( + application_ref=ReferenceDto(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/list", + method="POST", + json={ + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, annotation=ReferenceDto, direction="write" + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def configs_history( + self, + *, + variant_ref: ReferenceRequestModel, + application_ref: typing.Optional[ReferenceRequestModel] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ConfigResponseModel]: + """ + Parameters + ---------- + variant_ref : ReferenceRequestModel + + application_ref : typing.Optional[ReferenceRequestModel] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ConfigResponseModel] + Successful Response + + Examples + -------- + import asyncio + + from agenta import AsyncAgentaApi, ReferenceRequestModel + + client = AsyncAgentaApi( + api_key="YOUR_API_KEY", + base_url="https://yourhost.com/path/to/api", + ) + + + async def main() -> None: + await client.variants.configs_history( + variant_ref=ReferenceRequestModel(), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "variants/configs/history", + method="POST", + json={ + "variant_ref": convert_and_respect_annotation_metadata( + object_=variant_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + "application_ref": convert_and_respect_annotation_metadata( + object_=application_ref, + annotation=ReferenceRequestModel, + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ConfigResponseModel], + parse_obj_as( + type_=typing.List[ConfigResponseModel], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/services/completion-stateless-sdk/agenta/client/backend/variants/types/__init__.py b/services/completion-stateless-sdk/agenta/client/backend/variants/types/__init__.py new file mode 100644 index 0000000000..fac6b42753 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/variants/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +from .add_variant_from_base_and_config_response import ( + AddVariantFromBaseAndConfigResponse, +) + +__all__ = ["AddVariantFromBaseAndConfigResponse"] diff --git a/services/completion-stateless-sdk/agenta/client/backend/variants/types/add_variant_from_base_and_config_response.py b/services/completion-stateless-sdk/agenta/client/backend/variants/types/add_variant_from_base_and_config_response.py new file mode 100644 index 0000000000..0b9252e08b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/backend/variants/types/add_variant_from_base_and_config_response.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.app_variant_response import AppVariantResponse + +AddVariantFromBaseAndConfigResponse = typing.Union[ + AppVariantResponse, typing.Optional[typing.Any] +] diff --git a/services/completion-stateless-sdk/agenta/client/client.py b/services/completion-stateless-sdk/agenta/client/client.py new file mode 100644 index 0000000000..17dc1ac460 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/client.py @@ -0,0 +1,563 @@ +from typing import Dict, Any, Optional +import os +import time +import click +from pathlib import Path +from typing import List, Optional, Dict, Any + +import requests +from agenta.client.api_models import AppVariant, Image, VariantConfigPayload +from docker.models.images import Image as DockerImage +from requests.exceptions import RequestException + +BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api") + + +class APIRequestError(Exception): + """Exception to be raised when an API request fails.""" + + +def get_base_by_app_id_and_name( + app_id: str, base_name: str, host: str, api_key: str = None +) -> str: + """ + Get the base ID for a given app ID and base name. + + Args: + app_id (str): The ID of the app. + base_name (str): The name of the base. + host (str): The URL of the server. + api_key (str, optional): The API key to use for authentication. Defaults to None. + + Returns: + str: The ID of the base. + + Raises: + APIRequestError: If the request to get the base fails or the base does not exist on the server. + """ + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/bases/?app_id={app_id}&base_name={base_name}", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to get base failed with status code {response.status_code} and error message: {error_message}." + ) + if len(response.json()) == 0: + raise APIRequestError( + f"Base with name {base_name} does not exist on the server." + ) + else: + return response.json()[0]["base_id"] + + +def get_app_by_name(app_name: str, host: str, api_key: str = None) -> str: + """Get app by its name on the server. + + Args: + app_name (str): Name of the app + host (str): Hostname of the server + api_key (str): The API key to use for the request. + """ + + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/apps/?app_name={app_name}", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to get app failed with status code {response.status_code} and error message: {error_message}." + ) + if len(response.json()) == 0: + raise APIRequestError(f"App with name {app_name} does not exist on the server.") + else: + return response.json()[0]["app_id"] # only one app should exist for that name + + +def create_new_app(app_name: str, host: str, api_key: str = None) -> str: + """Creates new app on the server. + + Args: + app_name (str): Name of the app + host (str): Hostname of the server + api_key (str): The API key to use for the request. + """ + + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/apps/", + json={"app_name": app_name}, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to create new app failed with status code {response.status_code} and error message: {error_message}." + ) + return response.json()["app_id"] + + +def add_variant_to_server( + app_id: str, + base_name: str, + image: Image, + host: str, + api_key: str = None, + retries=10, + backoff_factor=1, +) -> Dict: + """ + Adds a variant to the server with a retry mechanism and a single-line loading state. + + Args: + app_id (str): The ID of the app to add the variant to. + base_name (str): The base name for the variant. + image (Image): The image to use for the variant. + host (str): The host URL of the server. + api_key (str): The API key to use for the request. + retries (int): Number of times to retry the request. + backoff_factor (float): Factor to determine the delay between retries (exponential backoff). + + Returns: + dict: The JSON response from the server. + + Raises: + APIRequestError: If the request to the server fails after retrying. + """ + variant_name = f"{base_name.lower()}.default" + payload = { + "variant_name": variant_name, + "base_name": base_name.lower(), + "config_name": "default", + "docker_id": image.docker_id, + "tags": image.tags, + } + + click.echo( + click.style("Waiting for the variant to be ready", fg="yellow"), nl=False + ) + + for attempt in range(retries): + try: + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/apps/{app_id}/variant/from-image/", + json=payload, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + response.raise_for_status() + click.echo(click.style("\nVariant added successfully.", fg="green")) + return response.json() + except RequestException as e: + if attempt < retries - 1: + click.echo(click.style(".", fg="yellow"), nl=False) + time.sleep(backoff_factor * (2**attempt)) + else: + raise APIRequestError( + click.style( + f"\nRequest to app_variant endpoint failed with status code {response.status_code} and error message: {e}.", + fg="red", + ) + ) + except Exception as e: + raise APIRequestError( + click.style(f"\nAn unexpected error occurred: {e}", fg="red") + ) + + +def start_variant( + variant_id: str, + host: str, + env_vars: Optional[Dict[str, str]] = None, + api_key: str = None, +) -> str: + """ + Starts or stops a container with the given variant and exposes its endpoint. + + Args: + variant_id (str): The ID of the variant. + host (str): The host URL. + env_vars (Optional[Dict[str, str]]): Optional environment variables to inject into the container. + api_key (str): The API key to use for the request. + + Returns: + str: The endpoint of the container. + + Raises: + APIRequestError: If the API request fails. + """ + payload = {} + payload["action"] = {"action": "START"} + if env_vars: + payload["env_vars"] = env_vars + try: + response = requests.put( + f"{host}/{BACKEND_URL_SUFFIX}/variants/{variant_id}/", + json=payload, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code == 404: + raise APIRequestError( + f"404: Variant with ID {variant_id} does not exist on the server." + ) + elif response.status_code != 200: + error_message = response.text + raise APIRequestError( + f"Request to start variant endpoint failed with status code {response.status_code} and error message: {error_message}." + ) + return response.json().get("uri", "") + + except RequestException as e: + raise APIRequestError(f"An error occurred while making the request: {e}") + + +def list_variants(app_id: str, host: str, api_key: str = None) -> List[AppVariant]: + """ + Returns a list of AppVariant objects for a given app_id and host. + + Args: + app_id (str): The ID of the app to retrieve variants for. + host (str): The URL of the host to make the request to. + api_key (str): The API key to use for the request. + + Returns: + List[AppVariant]: A list of AppVariant objects for the given app_id and host. + """ + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/apps/{app_id}/variants/", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + # Check for successful request + if response.status_code == 403: + raise APIRequestError( + f"No app by id {app_id} exists or you do not have access to it." + ) + elif response.status_code == 404: + raise APIRequestError( + f"No app by id {app_id} exists or you do not have access to it." + ) + elif response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to apps endpoint failed with status code {response.status_code} and error message: {error_message}." + ) + + app_variants = response.json() + return [AppVariant(**variant) for variant in app_variants] + + +def remove_variant(variant_id: str, host: str, api_key: str = None): + """ + Sends a DELETE request to the Agenta backend to remove a variant with the given ID. + + Args: + variant_id (str): The ID of the variant to be removed. + host (str): The URL of the Agenta backend. + api_key (str): The API key to use for the request. + + Raises: + APIRequestError: If the request to the remove_variant endpoint fails. + + Returns: + None + """ + response = requests.delete( + f"{host}/{BACKEND_URL_SUFFIX}/variants/{variant_id}", + headers={ + "Content-Type": "application/json", + "Authorization": api_key if api_key is not None else None, + }, + timeout=600, + ) + + # Check for successful request + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to remove_variant endpoint failed with status code {response.status_code} and error message: {error_message}" + ) + + +def update_variant_image(variant_id: str, image: Image, host: str, api_key: str = None): + """ + Update the image of a variant with the given ID. + + Args: + variant_id (str): The ID of the variant to update. + image (Image): The new image to set for the variant. + host (str): The URL of the host to send the request to. + api_key (str): The API key to use for the request. + + Raises: + APIRequestError: If the request to update the variant fails. + + Returns: + None + """ + response = requests.put( + f"{host}/{BACKEND_URL_SUFFIX}/variants/{variant_id}/image/", + json=image.dict(), + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to update app_variant failed with status code {response.status_code} and error message: {error_message}." + ) + + +def send_docker_tar( + app_id: str, base_name: str, tar_path: Path, host: str, api_key: str = None +) -> Image: + """ + Sends a Docker tar file to the specified host to build an image for the given app ID and variant name. + + Args: + app_id (str): The ID of the app. + base_name (str): The name of the codebase. + tar_path (Path): The path to the Docker tar file. + host (str): The URL of the host to send the request to. + api_key (str): The API key to use for the request. + + Returns: + Image: The built Docker image. + + Raises: + Exception: If the response status code is 500, indicating that serving the variant failed. + """ + with tar_path.open("rb") as tar_file: + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/containers/build_image/?app_id={app_id}&base_name={base_name}", + files={ + "tar_file": tar_file, + }, + headers={"Authorization": api_key} if api_key is not None else None, + timeout=1200, + ) + + if response.status_code == 500: + response_error = response.json() + error_msg = "Serving the variant failed.\n" + error_msg += f"Log: {response_error}\n" + error_msg += "Here's how you may be able to solve the issue:\n" + error_msg += "- First, make sure that the requirements.txt file has all the dependencies that you need.\n" + error_msg += "- Second, check the Docker logs for the backend image to see the error when running the Docker container." + raise Exception(error_msg) + + response.raise_for_status() + image = Image.parse_obj(response.json()) + return image + + +def save_variant_config( + base_id: str, + config_name: str, + parameters: Dict[str, Any], + overwrite: bool, + host: str, + api_key: Optional[str] = None, +) -> None: + """ + Saves a variant configuration to the Agenta backend. + If the config already exists, it will be overwritten if the overwrite argument is set to True. + If the config does does not exist, a new variant will be created. + + Args: + base_id (str): The ID of the base configuration. + config_name (str): The name of the variant configuration. + parameters (Dict[str, Any]): The parameters of the variant configuration. + overwrite (bool): Whether to overwrite an existing variant configuration with the same name. + host (str): The URL of the Agenta backend. + api_key (Optional[str], optional): The API key to use for authentication. Defaults to None. + + Raises: + ValueError: If the 'host' argument is not specified. + APIRequestError: If the request to the Agenta backend fails. + + Returns: + None + """ + if host is None: + raise ValueError("The 'host' is not specified in save_variant_config") + + variant_config = VariantConfigPayload( + base_id=base_id, + config_name=config_name, + parameters=parameters, + overwrite=overwrite, + ) + try: + response = requests.post( + f"{host}/{BACKEND_URL_SUFFIX}/configs/", + json=variant_config.dict(), + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + request = f"POST {host}/{BACKEND_URL_SUFFIX}/configs/ {variant_config.dict()}" + # Check for successful request + if response.status_code != 200: + error_message = response.json().get("detail", "Unknown error") + raise APIRequestError( + f"Request {request} to save_variant_config endpoint failed with status code {response.status_code}. Error message: {error_message}" + ) + except RequestException as e: + raise APIRequestError(f"Request failed: {str(e)}") + + +def fetch_variant_config( + base_id: str, + host: str, + config_name: Optional[str] = None, + environment_name: Optional[str] = None, + api_key: Optional[str] = None, +) -> Dict[str, Any]: + """ + Fetch a variant configuration from the server. + + Args: + base_id (str): ID of the base configuration. + config_name (str): Configuration name. + environment_name (str): Name of the environment. + host (str): The server host URL. + api_key (Optional[str], optional): The API key to use for authentication. Defaults to None. + + Raises: + APIRequestError: If the API request fails. + + Returns: + dict: The requested variant configuration. + """ + + if host is None: + raise ValueError("The 'host' is not specified in fetch_variant_config") + + try: + if environment_name: + endpoint_params = f"?base_id={base_id}&environment_name={environment_name}" + elif config_name: + endpoint_params = f"?base_id={base_id}&config_name={config_name}" + else: + raise ValueError( + "Either 'config_name' or 'environment_name' must be specified in fetch_variant_config" + ) + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/configs/{endpoint_params}", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + + request = f"GET {host}/{BACKEND_URL_SUFFIX}/configs/ {base_id} {config_name} {environment_name}" + + # Check for successful request + if response.status_code != 200: + error_message = response.json().get("detail", "Unknown error") + raise APIRequestError( + f"Request {request} to fetch_variant_config endpoint failed with status code {response.status_code}. Error message: {error_message}" + ) + + return response.json() + + except RequestException as e: + raise APIRequestError(f"Request failed: {str(e)}") + + +def validate_api_key(api_key: str, host: str) -> bool: + """ + Validates an API key with the Agenta backend. + + Args: + api_key (str): The API key to validate. + host (str): The URL of the Agenta backend. + + Returns: + bool: Whether the API key is valid or not. + """ + try: + headers = {"Authorization": api_key} + + prefix = api_key.split(".")[0] + + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/keys/{prefix}/validate/", + headers=headers, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to validate api key failed with status code {response.status_code} and error message: {error_message}." + ) + return True + except RequestException as e: + raise APIRequestError(f"An error occurred while making the request: {e}") + + +def retrieve_user_id(host: str, api_key: Optional[str] = None) -> str: + """Retrieve user ID from the server. + + Args: + host (str): The URL of the Agenta backend + api_key (str): The API key to validate with. + + Returns: + str: the user ID + """ + + try: + response = requests.get( + f"{host}/{BACKEND_URL_SUFFIX}/profile/", + headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json().get("detail", "Unknown error") + raise APIRequestError( + f"Request to fetch_user_profile endpoint failed with status code {response.status_code}. Error message: {error_message}" + ) + return response.json()["id"] + except RequestException as e: + raise APIRequestError(f"Request failed: {str(e)}") + + +from pydantic import BaseModel + + +# def run_evaluation(app_name: str, host: str, api_key: str = None) -> str: +def run_evaluation(app_name: str, host: str, api_key: str = None) -> str: + """Creates new app on the server. + Args: + app_name (str): Name of the app + host (str): Hostname of the server + api_key (str): The API key to use for the request. + """ + + new_evaluation = { + "app_id": "6583e552eb855930ec6b1bdd", + "variant_ids": [ + "6583e552eb855930ec6b1be3", + # "6570aed55d0eaff2293088e6" + ], + "evaluators_configs": ["65856b2b11d53fcce5894ab6"], + "testset_id": "6583e552eb855930ec6b1be4", + } + + response = requests.post( + f"{host}/api/evaluations/", + json=new_evaluation, + # headers={"Authorization": api_key} if api_key is not None else None, + timeout=600, + ) + if response.status_code != 200: + error_message = response.json() + raise APIRequestError( + f"Request to run evaluations failed with status code {response.status_code} and error message: {error_message}." + ) + + return response.json() diff --git a/services/completion-stateless-sdk/agenta/client/exceptions.py b/services/completion-stateless-sdk/agenta/client/exceptions.py new file mode 100644 index 0000000000..ac6fafe529 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/client/exceptions.py @@ -0,0 +1,2 @@ +class APIRequestError(Exception): + """Exception to be raised when an API request fails.""" diff --git a/services/completion-stateless-sdk/agenta/config.py b/services/completion-stateless-sdk/agenta/config.py new file mode 100644 index 0000000000..4034d7cd86 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/config.py @@ -0,0 +1,25 @@ +try: + from pydantic.v1 import BaseSettings # type: ignore +except ImportError: + from pydantic import BaseSettings # type: ignore + +import os +import toml +from pathlib import Path + +# Load the settings from the .toml file +toml_config = toml.load(f"{Path(__file__).parent}/config.toml") + +# Set the environment variables from the TOML configurations +os.environ["REGISTRY"] = toml_config["registry"] +os.environ["BACKEND_URL_SUFFIX"] = toml_config["backend_url_suffix"] +os.environ["ALLOW_ORIGINS"] = toml_config["allow_origins"] + + +class Settings(BaseSettings): + registry: str + backend_url_suffix: str + allow_origins: str + + +settings = Settings() diff --git a/services/completion-stateless-sdk/agenta/config.toml b/services/completion-stateless-sdk/agenta/config.toml new file mode 100644 index 0000000000..a29287ccea --- /dev/null +++ b/services/completion-stateless-sdk/agenta/config.toml @@ -0,0 +1,4 @@ +docker_registry_url="127.0.0.1:5001" +registry="agenta-server" +backend_url_suffix="api" +allow_origins="http://localhost:3000,http://localhost:3001,http://cloud.agenta.ai,https://cloud.agenta.ai" \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/docker/docker-assets/Dockerfile.cloud.template b/services/completion-stateless-sdk/agenta/docker/docker-assets/Dockerfile.cloud.template new file mode 100644 index 0000000000..633521b95c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker-assets/Dockerfile.cloud.template @@ -0,0 +1,9 @@ +FROM public.ecr.aws/h3w6n5z0/agentaai/lambda_templates_public:main + +COPY requirements.txt ${LAMBDA_TASK_ROOT} +RUN pip install --no-cache-dir --disable-pip-version-check -U agenta +RUN pip install --no-cache-dir --disable-pip-version-check -r requirements.txt +RUN pip install --no-cache-dir --disable-pip-version-check mangum +COPY . ${LAMBDA_TASK_ROOT} + +CMD [ "lambda_function.handler" ] diff --git a/services/completion-stateless-sdk/agenta/docker/docker-assets/Dockerfile.template b/services/completion-stateless-sdk/agenta/docker/docker-assets/Dockerfile.template new file mode 100644 index 0000000000..9eb6b06a54 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker-assets/Dockerfile.template @@ -0,0 +1,13 @@ +FROM agentaai/templates_v2:main + +WORKDIR /app + +COPY . . + +RUN pip install --no-cache-dir --disable-pip-version-check -U agenta +RUN pip install --no-cache-dir --disable-pip-version-check -r requirements.txt + +EXPOSE 80 + +RUN ["chmod", "+x", "./entrypoint.sh"] +CMD ["./entrypoint.sh"] diff --git a/services/completion-stateless-sdk/agenta/docker/docker-assets/README.md b/services/completion-stateless-sdk/agenta/docker/docker-assets/README.md new file mode 100644 index 0000000000..c448906cba --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker-assets/README.md @@ -0,0 +1 @@ +The code here is just used when creating the template to dockerize the app. It is not part of the cli. \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/docker/docker-assets/entrypoint.sh b/services/completion-stateless-sdk/agenta/docker/docker-assets/entrypoint.sh new file mode 100755 index 0000000000..3c6b353144 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker-assets/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +exec python main.py diff --git a/services/completion-stateless-sdk/agenta/docker/docker-assets/lambda_function.py b/services/completion-stateless-sdk/agenta/docker/docker-assets/lambda_function.py new file mode 100644 index 0000000000..ca186d6e82 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker-assets/lambda_function.py @@ -0,0 +1,6 @@ +import agenta +import _app +from mangum import Mangum + + +handler = Mangum(agenta.app) diff --git a/services/completion-stateless-sdk/agenta/docker/docker-assets/main.py b/services/completion-stateless-sdk/agenta/docker/docker-assets/main.py new file mode 100644 index 0000000000..df78f0d322 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker-assets/main.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/services/completion-stateless-sdk/agenta/docker/docker_utils.py b/services/completion-stateless-sdk/agenta/docker/docker_utils.py new file mode 100644 index 0000000000..99bfef038c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/docker/docker_utils.py @@ -0,0 +1,100 @@ +import logging +import shutil +import tarfile +import tempfile +from pathlib import Path +import os + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +DEBUG = os.environ.get("AGENTA_CLI_DEBUG", False) + + +def create_dockerfile(out_folder: Path) -> Path: + """Creates a dockerfile based on the template in the out_folder. + + Arguments: + out_folder -- Folder in which to create the Dockerfile. + """ + assert Path(out_folder).exists(), f"Folder {out_folder} does not exist." + dockerfile_template = ( + Path(__file__).parent / "docker-assets" / "Dockerfile.template" + ) + dockerfile_path = out_folder / "Dockerfile" + shutil.copy(dockerfile_template, dockerfile_path) + dockerfile_template = ( + Path(__file__).parent / "docker-assets" / "Dockerfile.cloud.template" + ) + dockerfile_path = out_folder / "Dockerfile.cloud" + shutil.copy(dockerfile_template, dockerfile_path) + + return dockerfile_path + + +def build_tar_docker_container(folder: Path, file_name: Path) -> Path: + """Builds the tar file container the files needed for the docker container + + Arguments: + folder -- the path containing the code for the app + file_name -- the file containing the main code of the app + Returns: + the path to the created tar file + """ + tarfile_path = folder / "docker.tar.gz" # output file + if tarfile_path.exists(): + tarfile_path.unlink() + + create_dockerfile(folder) + shutil.copytree(Path(__file__).parent.parent, folder / "agenta", dirs_exist_ok=True) + shutil.copy(Path(__file__).parent / "docker-assets" / "main.py", folder) + shutil.copy(Path(__file__).parent / "docker-assets" / "lambda_function.py", folder) + shutil.copy(Path(__file__).parent / "docker-assets" / "entrypoint.sh", folder) + + # Initialize agentaignore_content with an empty string + agentaignore_content = "" + + # Read the contents of .gitignore file + agentaignore_file_path = folder / ".agentaignore" + if agentaignore_file_path.exists(): + with open(agentaignore_file_path, "r") as agentaignore_file: + agentaignore_content = agentaignore_file.read() + + # Create a temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + # Clean - remove '/' from every files and folders in the gitignore contents + sanitized_patterns = [ + pattern.replace("/", "") for pattern in agentaignore_content.splitlines() + ] + + # Function to ignore files based on the patterns + def ignore_patterns(path, names): + return set(sanitized_patterns) + + # Use a single copytree call with ignore_patterns + shutil.copytree(folder, temp_path, ignore=ignore_patterns, dirs_exist_ok=True) + + # Rename the specified file to _app.py in the temporary directory + shutil.copy(temp_path / file_name, temp_path / "_app.py") + + # Create the tar.gz file + with tarfile.open(tarfile_path, "w:gz") as tar: + tar.add(temp_path, arcname=folder.name) + if not DEBUG: + # Clean up - remove specified files and folders + for item in ["agenta", "main.py", "lambda_function.py", "entrypoint.sh"]: + path = folder / item + if path.exists(): + if path.is_dir(): + shutil.rmtree(path) + else: + path.unlink() + + for dockerfile in folder.glob("Dockerfile*"): + dockerfile.unlink() + + # dockerfile_path.unlink() + return tarfile_path diff --git a/services/completion-stateless-sdk/agenta/sdk/__init__.py b/services/completion-stateless-sdk/agenta/sdk/__init__.py new file mode 100644 index 0000000000..4fc475ef45 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/__init__.py @@ -0,0 +1,71 @@ +from typing import Optional, Callable, Any + +from .utils.preinit import PreInitObject # always the first import! + +import agenta.client.backend.types as client_types # pylint: disable=wrong-import-order + +from .types import ( + DictInput, + MultipleChoice, + FloatParam, + InFile, + IntParam, + MultipleChoiceParam, + GroupedMultipleChoiceParam, + TextParam, + MessagesInput, + FileInputURL, + BinaryParam, + Prompt, + AgentaNodeDto, + AgentaNodesResponse, +) + +from .tracing import Tracing, get_tracer +from .decorators.tracing import instrument +from .tracing.conventions import Reference +from .decorators.routing import entrypoint, app, route +from .agenta_init import Config, AgentaSingleton, init as _init +from .utils.costs import calculate_token_usage +from .managers.vault import VaultManager +from .managers.config import ConfigManager +from .managers.variant import VariantManager +from .managers.deployment import DeploymentManager + +config = PreInitObject("agenta.config", Config) +DEFAULT_AGENTA_SINGLETON_INSTANCE = AgentaSingleton() + +types = client_types + +api = None +async_api = None + +tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore +tracer = get_tracer(tracing) + + +def init( + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, +): + global api, async_api, tracing, tracer # pylint: disable=global-statement + + _init( + host=host, + api_key=api_key, + config_fname=config_fname, + redact=redact, + redact_on_error=redact_on_error, + app_id=app_id, + ) + + api = DEFAULT_AGENTA_SINGLETON_INSTANCE.api # type: ignore + async_api = DEFAULT_AGENTA_SINGLETON_INSTANCE.async_api # type: ignore + + tracing = DEFAULT_AGENTA_SINGLETON_INSTANCE.tracing # type: ignore + tracer = get_tracer(tracing) diff --git a/services/completion-stateless-sdk/agenta/sdk/agenta_init.py b/services/completion-stateless-sdk/agenta/sdk/agenta_init.py new file mode 100644 index 0000000000..06659f4f4d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/agenta_init.py @@ -0,0 +1,230 @@ +import toml +from os import getenv +from typing import Optional, Callable, Any +from importlib.metadata import version + +from agenta.sdk.utils.logging import log +from agenta.sdk.utils.globals import set_global +from agenta.client.backend.client import AgentaApi, AsyncAgentaApi + +from agenta.sdk.tracing import Tracing +from agenta.sdk.context.routing import routing_context + + +class AgentaSingleton: + """Singleton class to save all the "global variables" for the sdk.""" + + _instance = None + config = None + tracing = None + + api = None + async_api = None + + def __new__(cls): + if not cls._instance: + cls._instance = super(AgentaSingleton, cls).__new__(cls) + return cls._instance + + def init( + self, + *, + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, + ) -> None: + """ + Main function to initialize the singleton. + + Initializes the singleton with the given `app_id`, `host`, and `api_key`. The order of precedence for these variables is: + 1. Explicit argument provided in the function call. + 2. Value from the configuration file specified by `config_fname`. + 3. Environment variables. + + Examples: + ag.init(app_id="xxxx", api_key="xxx") + ag.init(config_fname="config.toml") + ag.init() #assuming env vars are set + + Args: + app_id (Optional[str]): ID of the Agenta application. Defaults to None. If not provided, will look for "app_id" in the config file, then "AGENTA_APP_ID" in environment variables. + host (Optional[str]): Host name of the backend server. Defaults to None. If not provided, will look for "backend_host" in the config file, then "AGENTA_HOST" in environment variables. + api_key (Optional[str]): API Key to use with the host of the backend server. Defaults to None. If not provided, will look for "api_key" in the config file, then "AGENTA_API_KEY" in environment variables. + config_fname (Optional[str]): Path to the configuration file (relative or absolute). Defaults to None. + + Raises: + ValueError: If `app_id` is not specified either as an argument, in the config file, or in the environment variables. + """ + + log.info("Agenta - SDK version: %s", version("agenta")) + + config = {} + if config_fname: + config = toml.load(config_fname) + + self.host = ( + host + or getenv("AGENTA_HOST") + or config.get("backend_host") + or config.get("host") + or "https://cloud.agenta.ai" + ) + + self.app_id = app_id or config.get("app_id") or getenv("AGENTA_APP_ID") + # if not self.app_id: + # raise ValueError( + # "App ID must be specified. You can provide it in one of the following ways:\n" + # "1. As an argument when calling ag.init(app_id='your_app_id').\n" + # "2. In the configuration file specified by config_fname.\n" + # "3. As an environment variable 'AGENTA_APP_ID'." + # ) + + self.api_key = api_key or getenv("AGENTA_API_KEY") or config.get("api_key") + + self.base_id = getenv("AGENTA_BASE_ID") + + self.service_id = getenv("AGENTA_SERVICE_ID") or self.base_id + + log.info("Agenta - Service ID: %s", self.service_id) + log.info("Agenta - Application ID: %s", self.app_id) + + self.tracing = Tracing( + url=f"{self.host}/api/observability/v1/otlp/traces", # type: ignore + redact=redact, + redact_on_error=redact_on_error, + ) + + self.tracing.configure( + api_key=self.api_key, + service_id=self.service_id, + # DEPRECATING + app_id=self.app_id, + ) + + self.api = AgentaApi( + base_url=self.host + "/api", + api_key=self.api_key if self.api_key else "", + ) + + self.async_api = AsyncAgentaApi( + base_url=self.host + "/api", + api_key=self.api_key if self.api_key else "", + ) + + self.config = Config( + host=self.host, + base_id=self.base_id, + api_key=self.api_key, + ) + + +class Config: + def __init__( + self, + # LEGACY + host: Optional[str] = None, + base_id: Optional[str] = None, + api_key: Optional[str] = None, + # LEGACY + **kwargs, + ): + self.default_parameters = {**kwargs} + + def set_default(self, **kwargs): + self.default_parameters.update(kwargs) + + def get_default(self): + return self.default_parameters + + def __getattr__(self, key): + context = routing_context.get() + + parameters = context.parameters + + if not parameters: + return None + + if key in parameters: + value = parameters[key] + + if isinstance(value, dict): + nested_config = Config() + nested_config.set_default(**value) + + return nested_config + + return value + + return None + + ### --- LEGACY --- ### + + def register_default(self, overwrite=False, **kwargs): + """alias for default""" + return self.default(overwrite=overwrite, **kwargs) + + def default(self, overwrite=False, **kwargs): + """Saves the default parameters to the app_name and base_name in case they are not already saved. + Args: + overwrite: Whether to overwrite the existing configuration or not + **kwargs: A dict containing the parameters + """ + self.set(**kwargs) + + def set(self, **kwargs): + self.set_default(**kwargs) + + def all(self): + return self.default_parameters + + +def init( + host: Optional[str] = None, + api_key: Optional[str] = None, + config_fname: Optional[str] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + # DEPRECATING + app_id: Optional[str] = None, +): + """Main function to initialize the agenta sdk. + + Initializes agenta with the given `app_id`, `host`, and `api_key`. The order of precedence for these variables is: + 1. Explicit argument provided in the function call. + 2. Value from the configuration file specified by `config_fname`. + 3. Environment variables. + + - `app_id` is a required parameter (to be specified in one of the above ways) + - `host` is optional and defaults to "https://cloud.agenta.ai" + - `api_key` is optional and defaults to "". It is required only when using cloud or enterprise version of agenta. + + + Args: + app_id (Optional[str]): ID of the Agenta application. Defaults to None. If not provided, will look for "app_id" in the config file, then "AGENTA_APP_ID" in environment variables. + host (Optional[str]): Host name of the backend server. Defaults to None. If not provided, will look for "backend_host" in the config file, then "AGENTA_HOST" in environment variables. + api_key (Optional[str]): API Key to use with the host of the backend server. Defaults to None. If not provided, will look for "api_key" in the config file, then "AGENTA_API_KEY" in environment variables. + config_fname (Optional[str]): Path to the configuration file. Defaults to None. + + Raises: + ValueError: If `app_id` is not specified either as an argument, in the config file, or in the environment variables. + """ + + singleton = AgentaSingleton() + + singleton.init( + host=host, + api_key=api_key, + config_fname=config_fname, + redact=redact, + redact_on_error=redact_on_error, + app_id=app_id, + ) + + set_global( + config=singleton.config, + tracing=singleton.tracing, + ) diff --git a/services/completion-stateless-sdk/agenta/sdk/assets.py b/services/completion-stateless-sdk/agenta/sdk/assets.py new file mode 100644 index 0000000000..c62cc9dd97 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/assets.py @@ -0,0 +1,84 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4o-mini", + "gpt-4-1106-preview", + ], + "Gemini": ["gemini/gemini-1.5-pro-latest", "gemini/gemini-1.5-flash"], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-3-5-sonnet-20240620", + "anthropic/claude-3-opus-20240229", + "anthropic/claude-3-sonnet-20240229", + "anthropic/claude-3-haiku-20240307", + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} diff --git a/services/completion-stateless-sdk/agenta/sdk/client.py b/services/completion-stateless-sdk/agenta/sdk/client.py new file mode 100644 index 0000000000..ee94ced567 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/client.py @@ -0,0 +1,56 @@ +import os + +from cachetools import TTLCache, cached + +from agenta.client.backend.client import AgentaApi + + +class Agenta: + """Client class for interacting with the Agenta API.""" + + def __init__(self, api_key: str = None, host: str = None): + """ + Initializes the Agenta client with API key and host. + + Raises: + EnvironmentError: If AGENTA_API_KEY is not set. + """ + if not api_key and not os.environ.get("AGENTA_API_KEY"): + raise EnvironmentError( + "Required environment variables AGENTA_API_KEY is not set." + ) + self.api_key = api_key if api_key else os.environ.get("AGENTA_API_KEY") + self.host = ( + host if host else os.environ.get("AGENTA_HOST", "https://cloud.agenta.ai") + ) + self.cache = TTLCache(maxsize=1024, ttl=300) + backend_url = f"{self.host}/api" + self.client = AgentaApi(base_url=backend_url, api_key=self.api_key) + + def get_config(self, base_id: str, environment: str, cache_timeout: int = 300): + """ + Fetches and caches the configuration for a specified base ID and environment. + + Args: + base_id (str): The unique identifier for the base. + environment (str): The environment name (e.g., 'production', 'development'). + cache_timeout (int): The TTL for the cache in seconds. Defaults to 300 seconds. + + Returns: + dict: The configuration data retrieved from the Agenta API. + + Raises: + EnvironmentError: If the required AGENTA_API_KEY is not set in the environment variables. + """ + if cache_timeout != self.cache.ttl: + self.cache = TTLCache( + maxsize=1024, ttl=cache_timeout + ) # TODO: We need to modify this to use a dynamic TTLCache implementation in the future + + @cached(cache=self.cache) + def fetch_config(base_id: str, environment: str = "production"): + return self.client.configs.get_config( + base_id=base_id, environment_name=environment + ) + + return fetch_config(base_id, environment) diff --git a/services/completion-stateless-sdk/agenta/sdk/context/__init__.py b/services/completion-stateless-sdk/agenta/sdk/context/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/completion-stateless-sdk/agenta/sdk/context/exporting.py b/services/completion-stateless-sdk/agenta/sdk/context/exporting.py new file mode 100644 index 0000000000..2fe03a09cd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/context/exporting.py @@ -0,0 +1,25 @@ +from typing import Optional + +from contextlib import contextmanager +from contextvars import ContextVar + +from pydantic import BaseModel + + +class ExportingContext(BaseModel): + credentials: Optional[str] = None + + +exporting_context = ContextVar("exporting_context", default=ExportingContext()) + + +@contextmanager +def exporting_context_manager( + *, + context: Optional[ExportingContext] = None, +): + token = exporting_context.set(context) + try: + yield + finally: + exporting_context.reset(token) diff --git a/services/completion-stateless-sdk/agenta/sdk/context/routing.py b/services/completion-stateless-sdk/agenta/sdk/context/routing.py new file mode 100644 index 0000000000..1284898289 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/context/routing.py @@ -0,0 +1,26 @@ +from typing import Any, Dict, List, Optional + +from contextlib import contextmanager +from contextvars import ContextVar + +from pydantic import BaseModel + + +class RoutingContext(BaseModel): + parameters: Optional[Dict[str, Any]] = None + secrets: Optional[List[Any]] = None + + +routing_context = ContextVar("routing_context", default=RoutingContext()) + + +@contextmanager +def routing_context_manager( + *, + context: Optional[RoutingContext] = None, +): + token = routing_context.set(context) + try: + yield + finally: + routing_context.reset(token) diff --git a/services/completion-stateless-sdk/agenta/sdk/context/tracing.py b/services/completion-stateless-sdk/agenta/sdk/context/tracing.py new file mode 100644 index 0000000000..3bebe13dc1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/context/tracing.py @@ -0,0 +1,28 @@ +from typing import Any, Dict, Optional + +from contextlib import contextmanager +from contextvars import ContextVar + +from pydantic import BaseModel + + +class TracingContext(BaseModel): + credentials: Optional[str] = None + parameters: Optional[Dict[str, Any]] = None + references: Optional[Dict[str, Any]] = None + link: Optional[Dict[str, Any]] = None + + +tracing_context = ContextVar("tracing_context", default=TracingContext()) + + +@contextmanager +def tracing_context_manager( + *, + context: Optional[TracingContext] = None, +): + token = tracing_context.set(context) + try: + yield + finally: + tracing_context.reset(token) diff --git a/services/completion-stateless-sdk/agenta/sdk/decorators/__init__.py b/services/completion-stateless-sdk/agenta/sdk/decorators/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/completion-stateless-sdk/agenta/sdk/decorators/routing.py b/services/completion-stateless-sdk/agenta/sdk/decorators/routing.py new file mode 100644 index 0000000000..3b6433e898 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/decorators/routing.py @@ -0,0 +1,913 @@ +from typing import Type, Any, Callable, Dict, Optional, Tuple, List +from inspect import signature, iscoroutinefunction, Signature, Parameter, _empty +from functools import wraps +from traceback import format_exception +from asyncio import sleep + +from tempfile import NamedTemporaryFile +from annotated_types import Ge, Le, Gt, Lt +from pydantic import BaseModel, HttpUrl, ValidationError + +from fastapi import Body, FastAPI, UploadFile, HTTPException, Request + +from agenta.sdk.middleware.auth import AuthMiddleware +from agenta.sdk.middleware.otel import OTelMiddleware +from agenta.sdk.middleware.config import ConfigMiddleware +from agenta.sdk.middleware.vault import VaultMiddleware +from agenta.sdk.middleware.cors import CORSMiddleware + +from agenta.sdk.context.routing import ( + routing_context_manager, + RoutingContext, +) +from agenta.sdk.context.tracing import ( + tracing_context_manager, + tracing_context, + TracingContext, +) +from agenta.sdk.router import router +from agenta.sdk.utils.exceptions import suppress, display_exception +from agenta.sdk.utils.logging import log +from agenta.sdk.types import ( + DictInput, + FloatParam, + InFile, + IntParam, + MultipleChoiceParam, + MultipleChoice, + GroupedMultipleChoiceParam, + TextParam, + MessagesInput, + FileInputURL, + BaseResponse, + BinaryParam, +) + +import agenta as ag + + +app = FastAPI() +log.setLevel("DEBUG") + + +app.include_router(router, prefix="") + + +class PathValidator(BaseModel): + url: HttpUrl + + +class route: # pylint: disable=invalid-name + # This decorator is used to expose specific stages of a workflow (embedding, retrieval, summarization, etc.) + # as independent endpoints. It is designed for backward compatibility with existing code that uses + # the @entrypoint decorator, which has certain limitations. By using @route(), we can create new + # routes without altering the main workflow entrypoint. This helps in modularizing the services + # and provides flexibility in how we expose different functionalities as APIs. + def __init__( + self, + path: Optional[str] = "/", + config_schema: Optional[BaseModel] = None, + ): + self.config_schema: BaseModel = config_schema + path = "/" + path.strip("/").strip() + path = "" if path == "/" else path + PathValidator(url=f"http://example.com{path}") + + self.route_path = path + + self.e = None + + def __call__(self, f): + self.e = entrypoint( + f, + route_path=self.route_path, + config_schema=self.config_schema, + ) + + return f + + +class entrypoint: + """ + Decorator class to wrap a function for HTTP POST, terminal exposure and enable tracing. + + This decorator generates the following endpoints: + + Playground Endpoints + - /generate with @entrypoint, @route("/"), @route(path="") # LEGACY + - /playground/run with @entrypoint, @route("/"), @route(path="") + - /playground/run/{route} with @route({route}), @route(path={route}) + + Deployed Endpoints: + - /generate_deployed with @entrypoint, @route("/"), @route(path="") # LEGACY + - /run with @entrypoint, @route("/"), @route(path="") + - /run/{route} with @route({route}), @route(path={route}) + + The rationale is: + - There may be multiple endpoints, based on the different routes. + - It's better to make it explicit that an endpoint is for the playground. + - Prefixing the routes with /run is more futureproof in case we add more endpoints. + + Example: + ```python + import agenta as ag + + @ag.entrypoint + async def chain_of_prompts_llm(prompt: str): + return ... + ``` + """ + + routes = list() + + _middleware = False + _run_path = "/run" + _test_path = "/test" + # LEGACY + _legacy_playground_run_path = "/playground/run" + _legacy_generate_path = "/generate" + _legacy_generate_deployed_path = "/generate_deployed" + + def __init__( + self, + func: Callable[..., Any], + route_path: str = "", + config_schema: Optional[BaseModel] = None, + ): + self.func = func + self.route_path = route_path + self.config_schema = config_schema + + signature_parameters = signature(func).parameters + ingestible_files = self.extract_ingestible_files() + config, default_parameters = self.parse_config() + + ### --- Middleware --- # + if not entrypoint._middleware: + entrypoint._middleware = True + + app.add_middleware(VaultMiddleware) + app.add_middleware(ConfigMiddleware) + app.add_middleware(AuthMiddleware) + app.add_middleware(OTelMiddleware) + app.add_middleware(CORSMiddleware) + ### ------------------ # + + ### --- Run --- # + @wraps(func) + async def run_wrapper(request: Request, *args, **kwargs) -> Any: + # LEGACY + # TODO: Removing this implies breaking changes in : + # - calls to /generate_deployed + kwargs = { + k: v + for k, v in kwargs.items() + if k not in ["config", "environment", "app"] + } + # LEGACY + + if request.state.config["parameters"] is None: + raise HTTPException( + status_code=400, + detail="Config not found based on provided references.", + ) + + kwargs, _ = self.split_kwargs(kwargs, default_parameters) + + # TODO: Why is this not used in the run_wrapper? + # self.ingest_files(kwargs, ingestible_files) + + return await self.execute_wrapper(request, False, *args, **kwargs) + + self.update_run_wrapper_signature( + wrapper=run_wrapper, + ingestible_files=ingestible_files, + ) + + run_route = f"{entrypoint._run_path}{route_path}" + app.post(run_route, response_model=BaseResponse)(run_wrapper) + + # LEGACY + # TODO: Removing this implies breaking changes in : + # - calls to /generate_deployed must be replaced with calls to /run + if route_path == "": + run_route = entrypoint._legacy_generate_deployed_path + app.post(run_route, response_model=BaseResponse)(run_wrapper) + # LEGACY + ### ----------- # + + ### --- Test --- # + @wraps(func) + async def test_wrapper(request: Request, *args, **kwargs) -> Any: + kwargs, parameters = self.split_kwargs(kwargs, default_parameters) + + request.state.config["parameters"] = parameters + + # TODO: Why is this only used in the test_wrapper? + self.ingest_files(kwargs, ingestible_files) + + return await self.execute_wrapper(request, True, *args, **kwargs) + + self.update_test_wrapper_signature( + wrapper=test_wrapper, + ingestible_files=ingestible_files, + config_class=config, + config_dict=default_parameters, + ) + + test_route = f"{entrypoint._test_path}{route_path}" + app.post(test_route, response_model=BaseResponse)(test_wrapper) + + # LEGACY + # TODO: Removing this implies breaking changes in : + # - calls to /generate must be replaced with calls to /test + if route_path == "": + test_route = entrypoint._legacy_generate_path + app.post(test_route, response_model=BaseResponse)(test_wrapper) + # LEGACY + + # LEGACY + # TODO: Removing this implies no breaking changes + if route_path == "": + test_route = entrypoint._legacy_playground_run_path + app.post(test_route, response_model=BaseResponse)(test_wrapper) + # LEGACY + ### ------------ # + + ### --- OpenAPI --- # + test_route = f"{entrypoint._test_path}{route_path}" + entrypoint.routes.append( + { + "func": func.__name__, + "endpoint": test_route, + "params": ( + {**default_parameters, **signature_parameters} + if not config + else signature_parameters + ), + "config": config, + } + ) + + # LEGACY + if route_path == "": + test_route = entrypoint._legacy_generate_path + entrypoint.routes.append( + { + "func": func.__name__, + "endpoint": test_route, + "params": ( + {**default_parameters, **signature_parameters} + if not config + else signature_parameters + ), + "config": config, + } + ) + # LEGACY + + app.openapi_schema = None # Forces FastAPI to re-generate the schema + openapi_schema = app.openapi() + + for _route in entrypoint.routes: + self.override_schema( + openapi_schema=openapi_schema, + func_name=_route["func"], + endpoint=_route["endpoint"], + params=_route["params"], + ) + + if _route["config"] is not None: # new SDK version + self.override_config_in_schema( + openapi_schema=openapi_schema, + func_name=_route["func"], + endpoint=_route["endpoint"], + config=_route["config"], + ) + ### --------------- # + + def extract_ingestible_files(self) -> Dict[str, Parameter]: + """Extract parameters annotated as InFile from function signature.""" + + return { + name: param + for name, param in signature(self.func).parameters.items() + if param.annotation is InFile + } + + def parse_config(self) -> Dict[str, Any]: + config = None + default_parameters = ag.config.all() + + if self.config_schema: + try: + config = self.config_schema() if self.config_schema else None + default_parameters = config.dict() if config else default_parameters + except ValidationError as e: + raise ValueError( + f"Error initializing config_schema. Please ensure all required fields have default values: {str(e)}" + ) from e + except Exception as e: + raise ValueError( + f"Unexpected error initializing config_schema: {str(e)}" + ) from e + + return config, default_parameters + + def split_kwargs( + self, kwargs: Dict[str, Any], default_parameters: Dict[str, Any] + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + arguments = {k: v for k, v in kwargs.items() if k not in default_parameters} + parameters = {k: v for k, v in kwargs.items() if k in default_parameters} + + return arguments, parameters + + def ingest_file( + self, + upfile: UploadFile, + ): + temp_file = NamedTemporaryFile(delete=False) + temp_file.write(upfile.file.read()) + temp_file.close() + + return InFile(file_name=upfile.filename, file_path=temp_file.name) + + def ingest_files( + self, + func_params: Dict[str, Any], + ingestible_files: Dict[str, Parameter], + ) -> None: + """Ingest files specified in function parameters.""" + + for name in ingestible_files: + if name in func_params and func_params[name] is not None: + func_params[name] = self.ingest_file(func_params[name]) + + async def execute_wrapper( + self, + request: Request, + inline: bool, + *args, + **kwargs, + ): + if not request: + raise HTTPException(status_code=500, detail="Missing 'request'.") + + state = request.state + credentials = state.auth.get("credentials") + parameters = state.config.get("parameters") + references = state.config.get("references") + secrets = state.vault.get("secrets") + + with routing_context_manager( + context=RoutingContext( + parameters=parameters, + secrets=secrets, + ) + ): + with tracing_context_manager( + context=TracingContext( + credentials=credentials, + parameters=parameters, + references=references, + ) + ): + result = await self.execute_function(inline, *args, **kwargs) + + return result + + async def execute_function( + self, + inline: bool, + *args, + **kwargs, + ): + try: + result = ( + await self.func(*args, **kwargs) + if iscoroutinefunction(self.func) + else self.func(*args, **kwargs) + ) + + return await self.handle_success(result, inline) + + except Exception as error: # pylint: disable=broad-except + self.handle_failure(error) + + async def handle_success( + self, + result: Any, + inline: bool, + ): + data = None + tree = None + + with suppress(): + data = self.patch_result(result) + + if inline: + tree = await self.fetch_inline_trace(inline) + + try: + return BaseResponse(data=data, tree=tree) + except: + return BaseResponse(data=data) + + def handle_failure( + self, + error: Exception, + ): + display_exception("Application Exception") + + status_code = 500 + message = str(error) + stacktrace = format_exception(error, value=error, tb=error.__traceback__) # type: ignore + detail = {"message": message, "stacktrace": stacktrace} + + raise HTTPException(status_code=status_code, detail=detail) + + def patch_result( + self, + result: Any, + ): + """ + Patch the result to only include the message if the result is a FuncResponse-style dictionary with message, cost, and usage keys. + + Example: + ```python + result = { + "message": "Hello, world!", + "cost": 0.5, + "usage": { + "prompt_tokens": 10, + "completion_tokens": 20, + "total_tokens": 30 + } + } + result = patch_result(result) + print(result) + # Output: "Hello, world!" + ``` + """ + data = ( + result["message"] + if isinstance(result, dict) + and all(key in result for key in ["message", "cost", "usage"]) + else result + ) + + if data is None: + data = ( + "Function executed successfully, but did return None. \n Are you sure you did not forget to return a value?", + ) + + if not isinstance(result, dict): + data = str(data) + + return data + + async def fetch_inline_trace( + self, + inline, + ): + WAIT_FOR_SPANS = True + TIMEOUT = 1 + TIMESTEP = 0.1 + FINALSTEP = 0.001 + NOFSTEPS = TIMEOUT / TIMESTEP + + trace = None + + context = tracing_context.get() + + link = context.link + + trace_id = link.get("tree_id") if link else None + + if trace_id is not None: + if inline: + if WAIT_FOR_SPANS: + remaining_steps = NOFSTEPS + + while ( + not ag.tracing.is_inline_trace_ready(trace_id) + and remaining_steps > 0 + ): + await sleep(TIMESTEP) + + remaining_steps -= 1 + + await sleep(FINALSTEP) + + trace = ag.tracing.get_inline_trace(trace_id) + else: + trace = {"trace_id": trace_id} + + return trace + + # --- OpenAPI --- # + + def add_request_to_signature( + self, + wrapper: Callable[..., Any], + ): + original_sig = signature(wrapper) + parameters = [ + Parameter( + "request", + kind=Parameter.POSITIONAL_OR_KEYWORD, + annotation=Request, + ), + *original_sig.parameters.values(), + ] + new_sig = Signature( + parameters, + return_annotation=original_sig.return_annotation, + ) + wrapper.__signature__ = new_sig + + def update_wrapper_signature( + self, wrapper: Callable[..., Any], updated_params: List + ): + """ + Updates the signature of a wrapper function with a new list of parameters. + + Args: + wrapper (callable): A callable object, such as a function or a method, that requires a signature update. + updated_params (List[Parameter]): A list of `Parameter` objects representing the updated parameters + for the wrapper function. + """ + + wrapper_signature = signature(wrapper) + wrapper_signature = wrapper_signature.replace(parameters=updated_params) + wrapper.__signature__ = wrapper_signature # type: ignore + + def update_test_wrapper_signature( + self, + wrapper: Callable[..., Any], + config_class: Type[BaseModel], # TODO: change to our type + config_dict: Dict[str, Any], + ingestible_files: Dict[str, Parameter], + ) -> None: + """Update the function signature to include new parameters.""" + + updated_params: List[Parameter] = [] + if config_class: + self.add_config_params_to_parser(updated_params, config_class) + else: + self.deprecated_add_config_params_to_parser(updated_params, config_dict) + self.add_func_params_to_parser(updated_params, ingestible_files) + self.update_wrapper_signature(wrapper, updated_params) + self.add_request_to_signature(wrapper) + + def update_run_wrapper_signature( + self, + wrapper: Callable[..., Any], + ingestible_files: Dict[str, Parameter], + ) -> None: + """Update the function signature to include new parameters.""" + + updated_params: List[Parameter] = [] + self.add_func_params_to_parser(updated_params, ingestible_files) + for param in [ + "config", + "environment", + ]: # we add the config and environment parameters + updated_params.append( + Parameter( + name=param, + kind=Parameter.KEYWORD_ONLY, + default=Body(None), + annotation=str, + ) + ) + self.update_wrapper_signature(wrapper, updated_params) + self.add_request_to_signature(wrapper) + + def add_config_params_to_parser( + self, updated_params: list, config_class: Type[BaseModel] + ) -> None: + """Add configuration parameters to function signature.""" + for name, field in config_class.__fields__.items(): + assert field.default is not None, f"Field {name} has no default value" + updated_params.append( + Parameter( + name=name, + kind=Parameter.KEYWORD_ONLY, + annotation=field.annotation.__name__, + default=Body(field.default), + ) + ) + + def deprecated_add_config_params_to_parser( + self, updated_params: list, config_dict: Dict[str, Any] + ) -> None: + """Add configuration parameters to function signature.""" + for name, param in config_dict.items(): + assert ( + len(param.__class__.__bases__) == 1 + ), f"Inherited standard type of {param.__class__} needs to be one." + updated_params.append( + Parameter( + name=name, + kind=Parameter.KEYWORD_ONLY, + default=Body(param), + annotation=param.__class__.__bases__[ + 0 + ], # determines and get the base (parent/inheritance) type of the sdk-type at run-time. \ + # E.g __class__ is ag.MessagesInput() and accessing it parent type will return (,), \ + # thus, why we are accessing the first item. + ) + ) + + def add_func_params_to_parser( + self, + updated_params: list, + ingestible_files: Dict[str, Parameter], + ) -> None: + """Add function parameters to function signature.""" + for name, param in signature(self.func).parameters.items(): + if name in ingestible_files: + updated_params.append( + Parameter(name, param.kind, annotation=UploadFile) + ) + else: + assert ( + len(param.default.__class__.__bases__) == 1 + ), f"Inherited standard type of {param.default.__class__} needs to be one." + updated_params.append( + Parameter( + name, + Parameter.KEYWORD_ONLY, + default=Body(..., embed=True), + annotation=param.default.__class__.__bases__[ + 0 + ], # determines and get the base (parent/inheritance) type of the sdk-type at run-time. \ + # E.g __class__ is ag.MessagesInput() and accessing it parent type will return (,), \ + # thus, why we are accessing the first item. + ) + ) + + def override_config_in_schema( + self, + openapi_schema: dict, + func_name: str, + endpoint: str, + config: Type[BaseModel], + ): + endpoint = endpoint[1:].replace("/", "_") + schema_to_override = openapi_schema["components"]["schemas"][ + f"Body_{func_name}_{endpoint}_post" + ]["properties"] + # New logic + for param_name, param_val in config.__fields__.items(): + if param_val.annotation is str: + if any( + isinstance(constraint, MultipleChoice) + for constraint in param_val.metadata + ): + choices = next( + constraint.choices + for constraint in param_val.metadata + if isinstance(constraint, MultipleChoice) + ) + if isinstance(choices, dict): + schema_to_override[param_name]["x-parameter"] = "grouped_choice" + schema_to_override[param_name]["choices"] = choices + elif isinstance(choices, list): + schema_to_override[param_name]["x-parameter"] = "choice" + schema_to_override[param_name]["enum"] = choices + else: + schema_to_override[param_name]["x-parameter"] = "text" + if param_val.annotation is bool: + schema_to_override[param_name]["x-parameter"] = "bool" + if param_val.annotation in (int, float): + schema_to_override[param_name]["x-parameter"] = ( + "int" if param_val.annotation is int else "float" + ) + # Check for greater than or equal to constraint + if any(isinstance(constraint, Ge) for constraint in param_val.metadata): + min_value = next( + constraint.ge + for constraint in param_val.metadata + if isinstance(constraint, Ge) + ) + schema_to_override[param_name]["minimum"] = min_value + # Check for greater than constraint + elif any( + isinstance(constraint, Gt) for constraint in param_val.metadata + ): + min_value = next( + constraint.gt + for constraint in param_val.metadata + if isinstance(constraint, Gt) + ) + schema_to_override[param_name]["exclusiveMinimum"] = min_value + # Check for less than or equal to constraint + if any(isinstance(constraint, Le) for constraint in param_val.metadata): + max_value = next( + constraint.le + for constraint in param_val.metadata + if isinstance(constraint, Le) + ) + schema_to_override[param_name]["maximum"] = max_value + # Check for less than constraint + elif any( + isinstance(constraint, Lt) for constraint in param_val.metadata + ): + max_value = next( + constraint.lt + for constraint in param_val.metadata + if isinstance(constraint, Lt) + ) + schema_to_override[param_name]["exclusiveMaximum"] = max_value + + def override_schema( + self, openapi_schema: dict, func_name: str, endpoint: str, params: dict + ): + """ + Overrides the default openai schema generated by fastapi with additional information about: + - The choices available for each MultipleChoiceParam instance + - The min and max values for each FloatParam instance + - The min and max values for each IntParam instance + - The default value for DictInput instance + - The default value for MessagesParam instance + - The default value for FileInputURL instance + - The default value for BinaryParam instance + - ... [PLEASE ADD AT EACH CHANGE] + + Args: + openapi_schema (dict): The openapi schema generated by fastapi + func (str): The name of the function to override + endpoint (str): The name of the endpoint to override + params (dict(param_name, param_val)): The dictionary of the parameters for the function + """ + + def find_in_schema( + schema_type_properties: dict, schema: dict, param_name: str, xparam: str + ): + """Finds a parameter in the schema based on its name and x-parameter value""" + for _, value in schema.items(): + value_title_lower = str(value.get("title")).lower() + value_title = ( + "_".join(value_title_lower.split()) + if len(value_title_lower.split()) >= 2 + else value_title_lower + ) + + if ( + isinstance(value, dict) + and schema_type_properties.get("x-parameter") == xparam + and value_title == param_name + ): + # this will update the default type schema with the properties gotten + # from the schema type (param_val) __schema_properties__ classmethod + for type_key, type_value in schema_type_properties.items(): + # BEFORE: + # value = {'temperature': {'title': 'Temperature'}} + value[type_key] = type_value + # AFTER: + # value = {'temperature': { "type": "number", "title": "Temperature", "x-parameter": "float" }} + return value + + def get_type_from_param(param_val): + param_type = "string" + annotation = param_val.annotation + + if annotation == int: + param_type = "integer" + elif annotation == float: + param_type = "number" + elif annotation == dict: + param_type = "object" + elif annotation == bool: + param_type = "boolean" + elif annotation == list: + param_type = "list" + elif annotation == str: + param_type = "string" + else: + print("ERROR, unhandled annotation:", annotation) + + return param_type + + # Goes from '/some/path' to 'some_path' + endpoint = endpoint[1:].replace("/", "_") + + schema_to_override = openapi_schema["components"]["schemas"][ + f"Body_{func_name}_{endpoint}_post" + ]["properties"] + + for param_name, param_val in params.items(): + if isinstance(param_val, GroupedMultipleChoiceParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "grouped_choice", + ) + assert ( + subschema + ), f"GroupedMultipleChoiceParam '{param_name}' is in the parameters but could not be found in the openapi.json" + subschema["choices"] = param_val.choices # type: ignore + subschema["default"] = param_val.default # type: ignore + + elif isinstance(param_val, MultipleChoiceParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "choice", + ) + default = str(param_val) + param_choices = param_val.choices # type: ignore + choices = ( + [default] + param_choices + if param_val not in param_choices + else param_choices + ) + subschema["enum"] = choices + subschema["default"] = ( + default if default in param_choices else choices[0] + ) + + elif isinstance(param_val, FloatParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "float", + ) + subschema["minimum"] = param_val.minval # type: ignore + subschema["maximum"] = param_val.maxval # type: ignore + subschema["default"] = param_val + + elif isinstance(param_val, IntParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "int", + ) + subschema["minimum"] = param_val.minval # type: ignore + subschema["maximum"] = param_val.maxval # type: ignore + subschema["default"] = param_val + + elif isinstance(param_val, Parameter) and param_val.annotation is DictInput: + subschema = find_in_schema( + param_val.annotation.__schema_type_properties__(), + schema_to_override, + param_name, + "dict", + ) + subschema["default"] = param_val.default["default_keys"] + + elif isinstance(param_val, TextParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "text", + ) + subschema["default"] = param_val + + elif ( + isinstance(param_val, Parameter) + and param_val.annotation is MessagesInput + ): + subschema = find_in_schema( + param_val.annotation.__schema_type_properties__(), + schema_to_override, + param_name, + "messages", + ) + subschema["default"] = param_val.default + + elif ( + isinstance(param_val, Parameter) + and param_val.annotation is FileInputURL + ): + subschema = find_in_schema( + param_val.annotation.__schema_type_properties__(), + schema_to_override, + param_name, + "file_url", + ) + subschema["default"] = "https://example.com" + + elif isinstance(param_val, BinaryParam): + subschema = find_in_schema( + param_val.__schema_type_properties__(), + schema_to_override, + param_name, + "bool", + ) + subschema["default"] = param_val.default # type: ignore + else: + subschema = { + "title": str(param_name).capitalize(), + "type": get_type_from_param(param_val), + } + if param_val.default != _empty: + subschema["default"] = param_val.default # type: ignore + schema_to_override[param_name] = subschema diff --git a/services/completion-stateless-sdk/agenta/sdk/decorators/tracing.py b/services/completion-stateless-sdk/agenta/sdk/decorators/tracing.py new file mode 100644 index 0000000000..f368509fc6 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/decorators/tracing.py @@ -0,0 +1,296 @@ +from typing import Callable, Optional, Any, Dict, List, Union + +from functools import wraps +from itertools import chain +from inspect import iscoroutinefunction, getfullargspec + +from opentelemetry import baggage as baggage +from opentelemetry.context import attach, detach + +from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.context.tracing import tracing_context +from agenta.sdk.tracing.conventions import parse_span_kind + +import agenta as ag + + +class instrument: # pylint: disable=invalid-name + DEFAULT_KEY = "__default__" + + def __init__( + self, + type: str = "task", # pylint: disable=redefined-builtin + config: Optional[Dict[str, Any]] = None, + ignore_inputs: Optional[bool] = None, + ignore_outputs: Optional[bool] = None, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + max_depth: Optional[int] = 2, + # DEPRECATING + kind: str = "task", + spankind: Optional[str] = "TASK", + ) -> None: + self.type = spankind or kind or type + self.kind = None + self.config = config + self.ignore_inputs = ignore_inputs + self.ignore_outputs = ignore_outputs + self.redact = redact + self.redact_on_error = redact_on_error + self.max_depth = max_depth + + def __call__(self, func: Callable[..., Any]): + is_coroutine_function = iscoroutinefunction(func) + + @wraps(func) + async def awrapper(*args, **kwargs): + async def aauto_instrumented(*args, **kwargs): + self._parse_type_and_kind() + + token = self._attach_baggage() + + with ag.tracer.start_as_current_span(func.__name__, kind=self.kind): + self._pre_instrument(func, *args, **kwargs) + + result = await func(*args, **kwargs) + + self._post_instrument(result) + + return result + + self._detach_baggage(token) + + return await aauto_instrumented(*args, **kwargs) + + @wraps(func) + def wrapper(*args, **kwargs): + def auto_instrumented(*args, **kwargs): + self._parse_type_and_kind() + + token = self._attach_baggage() + + with ag.tracer.start_as_current_span(func.__name__, kind=self.kind): + self._pre_instrument(func, *args, **kwargs) + + result = func(*args, **kwargs) + + self._post_instrument(result) + + return result + + self._detach_baggage(token) + + return auto_instrumented(*args, **kwargs) + + return awrapper if is_coroutine_function else wrapper + + def _parse_type_and_kind(self): + if not ag.tracing.get_current_span().is_recording(): + self.type = "workflow" + + self.kind = parse_span_kind(self.type) + + def _attach_baggage(self): + context = tracing_context.get() + + references = context.references + + token = None + if references: + for k, v in references.items(): + token = attach(baggage.set_baggage(f"ag.refs.{k}", v)) + + return token + + def _detach_baggage( + self, + token, + ): + if token: + detach(token) + + def _pre_instrument( + self, + func, + *args, + **kwargs, + ): + span = ag.tracing.get_current_span() + + context = tracing_context.get() + + with suppress(): + trace_id = span.context.trace_id + + ag.tracing.credentials[trace_id] = context.credentials + + span.set_attributes( + attributes={"node": self.type}, + namespace="type", + ) + + if span.parent is None: + span.set_attributes( + attributes={"configuration": context.parameters or {}}, + namespace="meta", + ) + + _inputs = self._redact( + name=span.name, + field="inputs", + io=self._parse(func, *args, **kwargs), + ignore=self.ignore_inputs, + ) + + span.set_attributes( + attributes={"inputs": _inputs}, + namespace="data", + max_depth=self.max_depth, + ) + + def _post_instrument( + self, + result, + ): + span = ag.tracing.get_current_span() + with suppress(): + cost = None + usage = {} + + if isinstance(result, dict): + cost = result.get("cost", None) + usage = result.get("usage", {}) + + if isinstance(usage, (int, float)): + usage = {"total_tokens": usage} + + span.set_attributes( + attributes={"total": cost}, + namespace="metrics.unit.costs", + ) + span.set_attributes( + attributes=( + { + "prompt": usage.get("prompt_tokens", None), + "completion": usage.get("completion_tokens", None), + "total": usage.get("total_tokens", None), + } + ), + namespace="metrics.unit.tokens", + ) + + _outputs = self._redact( + name=span.name, + field="outputs", + io=self._patch(result), + ignore=self.ignore_outputs, + ) + + span.set_attributes( + attributes={"outputs": _outputs}, + namespace="data", + max_depth=self.max_depth, + ) + + span.set_status("OK") + + with suppress(): + if hasattr(span, "parent") and span.parent is None: + context = tracing_context.get() + context.link = { + "tree_id": span.get_span_context().trace_id, + "node_id": span.get_span_context().span_id, + } + tracing_context.set(context) + + def _parse( + self, + func, + *args, + **kwargs, + ) -> Dict[str, Any]: + inputs = { + key: value + for key, value in chain( + zip(getfullargspec(func).args, args), + kwargs.items(), + ) + } + + return inputs + + def _redact( + self, + *, + name: str, + field: str, + io: Dict[str, Any], + ignore: Union[List[str], bool] = False, + ) -> Dict[str, Any]: + """ + Redact user-defined sensitive information + from inputs and outputs as defined by the ignore list or boolean flag. + + Example: + - ignore = ["password"] -> {"username": "admin", "password": "********"} + -> {"username": "admin"} + - ignore = True -> {"username": "admin", "password": "********"} + -> {} + - ignore = False -> {"username": "admin", "password": "********"} + -> {"username": "admin", "password": "********"} + """ + io = { + key: value + for key, value in io.items() + if key + not in ( + ignore + if isinstance(ignore, list) + else io.keys() + if ignore is True + else [] + ) + } + + if self.redact is not None: + try: + io = self.redact(name, field, io) + except: # pylint: disable=bare-except + if self.redact_on_error: + io = {} + + if ag.tracing.redact is not None: + try: + io = ag.tracing.redact(name, field, io) + except: # pylint: disable=bare-except + if ag.tracing.redact_on_error: + io = {} + + return io + + def _patch( + self, + result: Any, + ) -> Dict[str, Any]: + """ + Patch the result to ensure that it is a dictionary, with a default key when necessary. + + Example: + - result = "Hello, World!" + -> {"__default__": "Hello, World!"} + - result = {"message": "Hello, World!", "cost": 0.0, "usage": {}} + -> {"__default__": "Hello, World!"} + - result = {"message": "Hello, World!"} + -> {"message": "Hello, World!"} + """ + outputs = ( + {instrument.DEFAULT_KEY: result} + if not isinstance(result, dict) + else ( + {instrument.DEFAULT_KEY: result["message"]} + if all(key in result for key in ["message", "cost", "usage"]) + else result + ) + ) + + return outputs diff --git a/services/completion-stateless-sdk/agenta/sdk/litellm/__init__.py b/services/completion-stateless-sdk/agenta/sdk/litellm/__init__.py new file mode 100644 index 0000000000..e9ce42ea24 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/litellm/__init__.py @@ -0,0 +1 @@ +from .litellm import litellm_handler diff --git a/services/completion-stateless-sdk/agenta/sdk/litellm/litellm.py b/services/completion-stateless-sdk/agenta/sdk/litellm/litellm.py new file mode 100644 index 0000000000..d6e2e57c14 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/litellm/litellm.py @@ -0,0 +1,314 @@ +from typing import Dict +from opentelemetry.trace import SpanKind + +import agenta as ag + +from agenta.sdk.tracing.spans import CustomSpan +from agenta.sdk.utils.exceptions import suppress # TODO: use it ! +from agenta.sdk.utils.logging import log + + +def litellm_handler(): + try: + from litellm.integrations.custom_logger import ( # pylint: disable=import-outside-toplevel + CustomLogger as LitellmCustomLogger, + ) + except ImportError as exc: + raise ImportError( + "The litellm SDK is not installed. Please install it using `pip install litellm`." + ) from exc + except Exception as exc: + raise Exception( # pylint: disable=broad-exception-raised + f"Unexpected error occurred when importing litellm: {exc}" + ) from exc + + class LitellmHandler(LitellmCustomLogger): + """ + This handler is responsible for instrumenting certain events, + when using litellm to call LLMs. + + Args: + LitellmCustomLogger (object): custom logger that allows us + to override the events to capture. + """ + + def __init__(self): + super().__init__() + + self.span: Dict[str, CustomSpan] = dict() + + def log_pre_api_call( + self, + model, + messages, + kwargs, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + type = ( # pylint: disable=redefined-builtin + "chat" + if kwargs.get("call_type") in ["completion", "acompletion"] + else "embedding" + ) + + kind = SpanKind.CLIENT + + self.span[litellm_call_id] = CustomSpan( + ag.tracer.start_span(name=f"litellm_{kind.name.lower()}", kind=kind) + ) + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + log.error("Agenta SDK - litellm span not recording.") + return + + span.set_attributes( + attributes={"node": type}, + namespace="type", + ) + + span.set_attributes( + attributes={"inputs": {"prompt": kwargs["messages"]}}, + namespace="data", + ) + + span.set_attributes( + attributes={ + "configuration": { + "model": kwargs.get("model"), + **kwargs.get("optional_params"), + } + }, + namespace="meta", + ) + + def log_stream_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + def log_success_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + if kwargs.get("stream"): + return + + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + try: + result = [] + for choice in response_obj.choices: + message = choice.message.__dict__ + result.append(message) + + outputs = {"completion": result} + span.set_attributes( + attributes={"outputs": outputs}, + namespace="data", + ) + + except Exception as e: + pass + + span.set_attributes( + attributes={"total": kwargs.get("response_cost")}, + namespace="metrics.unit.costs", + ) + + span.set_attributes( + attributes=( + { + "prompt": response_obj.usage.prompt_tokens, + "completion": response_obj.usage.completion_tokens, + "total": response_obj.usage.total_tokens, + } + ), + namespace="metrics.unit.tokens", + ) + + span.set_status(status="OK") + + span.end() + + def log_failure_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + span.record_exception(kwargs["exception"]) + + span.set_status(status="ERROR") + + span.end() + + async def async_log_stream_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + if kwargs.get("stream"): + return + + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + async def async_log_success_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + try: + result = [] + for choice in response_obj.choices: + message = choice.message.__dict__ + result.append(message) + + outputs = {"completion": result} + span.set_attributes( + attributes={"outputs": outputs}, + namespace="data", + ) + + except Exception as e: + pass + + span.set_attributes( + attributes={"total": kwargs.get("response_cost")}, + namespace="metrics.unit.costs", + ) + + span.set_attributes( + attributes=( + { + "prompt": response_obj.usage.prompt_tokens, + "completion": response_obj.usage.completion_tokens, + "total": response_obj.usage.total_tokens, + } + ), + namespace="metrics.unit.tokens", + ) + + span.set_status(status="OK") + + span.end() + + async def async_log_failure_event( + self, + kwargs, + response_obj, + start_time, + end_time, + ): + litellm_call_id = kwargs.get("litellm_call_id") + + if not litellm_call_id: + log.warning("Agenta SDK - litellm tracing failed") + return + + span = self.span[litellm_call_id] + + if not span: + log.warning("Agenta SDK - litellm tracing failed") + return + + if not span.is_recording(): + return + + span.record_exception(kwargs["exception"]) + + span.set_status(status="ERROR") + + span.end() + + return LitellmHandler() diff --git a/services/completion-stateless-sdk/agenta/sdk/managers/__init__.py b/services/completion-stateless-sdk/agenta/sdk/managers/__init__.py new file mode 100644 index 0000000000..53f8e20935 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/managers/__init__.py @@ -0,0 +1,6 @@ +from agenta.sdk.managers.config import ConfigManager +from agenta.sdk.managers.variant import VariantManager +from agenta.sdk.managers.deployment import DeploymentManager + + +__all__ = ["ConfigManager", "VariantManager", "DeploymentManager"] diff --git a/services/completion-stateless-sdk/agenta/sdk/managers/config.py b/services/completion-stateless-sdk/agenta/sdk/managers/config.py new file mode 100644 index 0000000000..d3ec7b97cb --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/managers/config.py @@ -0,0 +1,208 @@ +import json +import logging +from pathlib import Path +from typing import Optional, Type, TypeVar, Dict, Any, Union + +import yaml +from pydantic import BaseModel + +from agenta.sdk.managers.shared import SharedManager +from agenta.sdk.context.routing import routing_context + +T = TypeVar("T", bound=BaseModel) + +logger = logging.getLogger(__name__) + +AVAILABLE_ENVIRONMENTS = ["development", "production", "staging"] + + +class ConfigManager: + @staticmethod + def get_from_route( + schema: Optional[Type[T]] = None, + ) -> Optional[Union[Dict[str, Any], T]]: + """ + Retrieves the configuration from the route context and returns a config object. + + This method checks the route context for configuration information and returns + an instance of the specified schema based on the available context data. + + Args: + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + ValueError: If conflicting configuration sources are provided or if no valid + configuration source is found in the context. + + Note: + The method prioritizes the inputs in the following way: + 1. 'config' (i.e. when called explicitly from the playground) + 2. 'environment' + 3. 'variant' + Only one of these should be provided. + """ + + context = routing_context.get() + + parameters = context.parameters + + if not parameters: + return None + + if not schema: + return parameters + + return schema(**parameters) + + @staticmethod + def get_from_registry( + schema: Optional[Type[T]] = None, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ) -> Union[Dict[str, Any], T]: + """ + Pulls the parameters for the app variant from the server and returns a config object. + + This method retrieves the configuration from the backend server based on the provided + environment or variant. It then validates and returns the configuration as an instance + of the specified schema. + + Args: + app_slug (str): The unique identifier for the application whose configuration is to be fetched. + variant_slug (Optional[str]): The variant name to fetch the configuration for. Defaults to None. + variant_version (Optional[int]): The version number of the variant to fetch. Defaults to None. + environment_slug (Optional[str]): The environment name to fetch the configuration for. + Must be one of "development", "production", or "staging". Defaults to None. + + Raises: + Exception: For any other errors during the process (e.g., API communication issues). + """ + config = SharedManager.fetch( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + if schema: + return schema(**config.params) + + return config.params + + @staticmethod + async def aget_from_registry( + schema: Optional[Type[T]] = None, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ) -> Union[Dict[str, Any], T]: + """ + Pulls the parameters for the app variant from the server and returns a config object. + + This method retrieves the configuration from the backend server based on the provided + environment or variant. It then validates and returns the configuration as an instance + of the specified schema. + + Args: + app_slug (str): The unique identifier for the application whose configuration is to be fetched. + variant_slug (Optional[str]): The variant name to fetch the configuration for. Defaults to None. + variant_version (Optional[int]): The version number of the variant to fetch. Defaults to None. + environment_slug (Optional[str]): The environment name to fetch the configuration for. + Must be one of "development", "production", or "staging". Defaults to None. + + Raises: + Exception: For any other errors during the process (e.g., API communication issues). + """ + config = await SharedManager.afetch( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + if schema: + return schema(**config.params) + + return config.params + + @staticmethod + def get_from_yaml( + filename: str, + schema: Optional[Type[T]] = None, + ) -> T: + """ + Loads configuration from a YAML file and returns a config object. + + Args: + filename (str): The name of the YAML file to load. + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + FileNotFoundError: If the specified file doesn't exist. + ValidationError: If the loaded configuration data doesn't match the schema. + """ + file_path = Path(filename) + + with open(file_path, "r", encoding="utf-8") as file: + parameters = yaml.safe_load(file) + + if schema: + return schema(**parameters) + + return parameters + + @staticmethod + def get_from_json( + filename: str, + schema: Optional[Type[T]] = None, + ) -> T: + """ + Loads configuration from a JSON file and returns a config object. + + Args: + filename (str): The name of the JSON file to load. + schema (Type[T]): A Pydantic model class that defines the structure of the configuration. + + Returns: + T: An instance of the specified schema populated with the configuration data. + + Raises: + FileNotFoundError: If the specified file doesn't exist. + ValidationError: If the loaded configuration data doesn't match the schema. + """ + file_path = Path(filename) + + with open(file_path, "r", encoding="utf-8") as file: + parameters = json.load(file) + + if schema: + return schema(**parameters) + + return parameters diff --git a/services/completion-stateless-sdk/agenta/sdk/managers/deployment.py b/services/completion-stateless-sdk/agenta/sdk/managers/deployment.py new file mode 100644 index 0000000000..458170c853 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/managers/deployment.py @@ -0,0 +1,45 @@ +from typing import Optional + +from agenta.sdk.managers.shared import SharedManager + + +class DeploymentManager: + @classmethod + def deploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + deployment = SharedManager.deploy( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + variant_version=variant_version, + environment_slug=environment_slug, + ) + return deployment + + @classmethod + async def adeploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + deployment = await SharedManager.adeploy( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + variant_version=variant_version, + environment_slug=environment_slug, + ) + return deployment diff --git a/services/completion-stateless-sdk/agenta/sdk/managers/shared.py b/services/completion-stateless-sdk/agenta/sdk/managers/shared.py new file mode 100644 index 0000000000..40e2fbd7b2 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/managers/shared.py @@ -0,0 +1,639 @@ +import logging +from typing import Optional, Dict, Any + +from agenta.sdk.utils.exceptions import handle_exceptions + +from agenta.sdk.types import ( + ConfigurationResponse, + DeploymentResponse, +) +from agenta.client.backend.types.config_dto import ConfigDto as ConfigRequest +from agenta.client.backend.types.config_response_model import ConfigResponseModel +from agenta.client.backend.types.reference_request_model import ReferenceRequestModel + +import agenta as ag + + +logger = logging.getLogger(__name__) + + +class SharedManager: + """ + SharedManager is a utility class that serves as an interface for managing + application configurations, variants, and deployments through the Agenta API. + It provides both synchronous and asynchronous methods, allowing flexibility + depending on the context of use (e.g., blocking or non-blocking environments). + + Attributes: + client (AgentaApi): Synchronous client for interacting with the Agenta API. + aclient (AsyncAgentaApi): Asynchronous client for interacting with the Agenta API. + + Notes: + - The class manages both synchronous and asynchronous interactions with the API, allowing users to + select the method that best fits their needs. + - Methods prefixed with 'a' (e.g., aadd, afetch) are designed to be used in asynchronous environments. + """ + + @classmethod + def _parse_fetch_request( + cls, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + if variant_slug and not (app_id or app_slug): + raise ValueError("`variant_slug` requires `app_id` or `app_slug`") + if variant_version and not variant_slug: + raise ValueError("`variant_version` requires `variant_slug`") + if environment_slug and not (app_id or app_slug): + raise ValueError("`environment_slug` requires `app_id` or `app_slug`") + if environment_version and not environment_slug: + raise ValueError("`environment_version` requires `environment_slug`") + + return { + "app_id": app_id, + "app_slug": app_slug, + "variant_id": variant_id, + "variant_slug": variant_slug, + "variant_version": variant_version, + "environment_id": environment_id, + "environment_slug": environment_slug, + "environment_version": environment_version, + } + + @classmethod + def _parse_config_response( + cls, + model: ConfigResponseModel, + ) -> Dict[str, Any]: + flattened: Dict[str, Any] = {} + + # Process application_ref + if model.application_ref: + flattened["app_id"] = model.application_ref.id + flattened["app_slug"] = model.application_ref.slug + + # Process variant_ref + if model.variant_ref: + flattened["variant_id"] = model.variant_ref.id + flattened["variant_slug"] = model.variant_ref.slug + flattened["variant_version"] = model.variant_ref.version + + # Process environment_ref + if model.environment_ref: + flattened["environment_id"] = model.environment_ref.id + flattened["environment_slug"] = model.environment_ref.slug + flattened["environment_version"] = model.environment_ref.version + + # Process variant_lifecycle + if model.variant_lifecycle: + flattened["committed_at"] = model.variant_lifecycle.updated_at + flattened["committed_by"] = model.variant_lifecycle.updated_by + flattened["committed_by_id"] = model.variant_lifecycle.updated_by_id + + # Process environment_lifecycle + if model.environment_lifecycle: + flattened["deployed_at"] = model.environment_lifecycle.created_at + flattened["deployed_by"] = model.environment_lifecycle.updated_by + flattened["deployed_by_id"] = model.environment_lifecycle.updated_by_id + + # Add parameters + flattened["params"] = model.params or {} + + return flattened + + @classmethod + def _ref_or_none( + cls, + *, + id: Optional[str] = None, + slug: Optional[str] = None, + version: Optional[int] = None, + ) -> Optional[ReferenceRequestModel]: + if not id and not slug and not version: + return None + + return ReferenceRequestModel(id=id, slug=slug, version=version) + + @classmethod + @handle_exceptions() + def add( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + config_response = ag.api.variants.configs_add( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + response = SharedManager._parse_config_response(config_response) + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def aadd( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + config_response = await ag.async_api.variants.configs_add( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def fetch( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ) -> ConfigurationResponse: + fetch_signatures = SharedManager._parse_fetch_request( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + config_response = ag.api.variants.configs_fetch( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["variant_slug"], + version=fetch_signatures["variant_version"], + id=fetch_signatures["variant_id"], + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["environment_slug"], + version=fetch_signatures["environment_version"], + id=fetch_signatures["environment_id"], + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["app_slug"], + version=None, + id=fetch_signatures["app_id"], + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def afetch( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + fetch_signatures = SharedManager._parse_fetch_request( + app_id=app_id, + app_slug=app_slug, + variant_id=variant_id, + variant_slug=variant_slug, + variant_version=variant_version, + environment_id=environment_id, + environment_slug=environment_slug, + environment_version=environment_version, + ) + + config_response = await ag.async_api.variants.configs_fetch( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["variant_slug"], + version=fetch_signatures["variant_version"], + id=fetch_signatures["variant_id"], + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["environment_slug"], + version=fetch_signatures["environment_version"], + id=fetch_signatures["environment_id"], + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=fetch_signatures["app_slug"], + version=None, + id=fetch_signatures["app_id"], + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def list( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + configs_response = ag.api.variants.configs_list( # type: ignore + application_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + async def alist( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + configs_response = await ag.async_api.variants.configs_list( # type: ignore + application_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + def history( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + ): + configs_response = ag.api.variants.configs_history( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + async def ahistory( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + ): + configs_response = await ag.async_api.variants.configs_history( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=None, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + transformed_response = [ + SharedManager._parse_config_response(config_response) + for config_response in configs_response + ] + + return [ + ConfigurationResponse(**response) # type: ignore + for response in transformed_response + ] + + @classmethod + @handle_exceptions() + def fork( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + config_response = ag.api.variants.configs_fork( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=environment_version, + id=environment_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def afork( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + environment_id: Optional[str] = None, + environment_slug: Optional[str] = None, + environment_version: Optional[int] = None, + ): + config_response = await ag.async_api.variants.configs_fork( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=environment_version, + id=environment_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def commit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant_ref = SharedManager._ref_or_none( # type: ignore # type: ignore + slug=variant_slug, + version=None, + id=None, + ) + application_ref = SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ) + config_response = ag.api.variants.configs_commit( # type: ignore + config=ConfigRequest( + params=parameters, + variant_ref=variant_ref.model_dump() if variant_ref else None, # type: ignore + application_ref=application_ref.model_dump() if application_ref else None, # type: ignore + ) + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + async def acommit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + config_response = await ag.async_api.variants.configs_commit( # type: ignore + config=ConfigRequest( + params=parameters, + variant_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=variant_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + ) + + response = SharedManager._parse_config_response(config_response) + + return ConfigurationResponse(**response) + + @classmethod + @handle_exceptions() + def deploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = ag.api.variants.configs_deploy( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=None, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return DeploymentResponse(**response) + + @classmethod + @handle_exceptions() + async def adeploy( + cls, + *, + variant_slug: str, + environment_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = await ag.async_api.variants.configs_deploy( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=None, + ), + environment_ref=SharedManager._ref_or_none( # type: ignore + slug=environment_slug, + version=None, + id=None, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) + + response = SharedManager._parse_config_response(config_response) + + return DeploymentResponse(**response) + + @classmethod + @handle_exceptions() + def delete( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = ag.api.variants.configs_delete( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + return config_response + + @classmethod + @handle_exceptions() + async def adelete( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + variant_id: Optional[str] = None, + variant_slug: Optional[str] = None, + variant_version: Optional[int] = None, + ): + config_response = await ag.async_api.variants.configs_delete( # type: ignore + variant_ref=SharedManager._ref_or_none( # type: ignore + slug=variant_slug, + version=variant_version, + id=variant_id, + ), + application_ref=SharedManager._ref_or_none( # type: ignore + slug=app_slug, + version=None, + id=app_id, + ), + ) # type: ignore + + return config_response diff --git a/services/completion-stateless-sdk/agenta/sdk/managers/variant.py b/services/completion-stateless-sdk/agenta/sdk/managers/variant.py new file mode 100644 index 0000000000..8df15eac47 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/managers/variant.py @@ -0,0 +1,182 @@ +from typing import Optional + +from agenta.sdk.managers.shared import SharedManager + + +class VariantManager(SharedManager): + @classmethod + def create( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = SharedManager.add( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + + if variant: + variant = SharedManager.commit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + + return variant + + @classmethod + async def acreate( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = await SharedManager.aadd( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + if variant: + variant = await SharedManager.acommit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + + return variant + + @classmethod + def commit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = SharedManager.commit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variant + + @classmethod + async def acommit( + cls, + *, + parameters: dict, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variant = await SharedManager.acommit( + parameters=parameters, + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variant + + @classmethod + def delete( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + message = SharedManager.delete( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return message + + @classmethod + async def adelete( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + message = await SharedManager.adelete( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return message + + @classmethod + def list( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = SharedManager.list( + app_id=app_id, + app_slug=app_slug, + ) + return variants + + @classmethod + async def alist( + cls, + *, + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = await SharedManager.alist( + app_id=app_id, + app_slug=app_slug, + ) + return variants + + @classmethod + def history( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = SharedManager.history( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variants + + @classmethod + async def ahistory( + cls, + *, + variant_slug: str, + # + app_id: Optional[str] = None, + app_slug: Optional[str] = None, + ): + variants = await SharedManager.ahistory( + app_id=app_id, + app_slug=app_slug, + variant_slug=variant_slug, + ) + return variants diff --git a/services/completion-stateless-sdk/agenta/sdk/managers/vault.py b/services/completion-stateless-sdk/agenta/sdk/managers/vault.py new file mode 100644 index 0000000000..f559af19d2 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/managers/vault.py @@ -0,0 +1,16 @@ +from typing import Optional, Dict, Any + +from agenta.sdk.context.routing import routing_context + + +class VaultManager: + @staticmethod + def get_from_route() -> Optional[Dict[str, Any]]: + context = routing_context.get() + + secrets = context.secrets + + if not secrets: + return None + + return secrets diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/__init__.py b/services/completion-stateless-sdk/agenta/sdk/middleware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/auth.py b/services/completion-stateless-sdk/agenta/sdk/middleware/auth.py new file mode 100644 index 0000000000..fd82198d05 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/middleware/auth.py @@ -0,0 +1,180 @@ +from typing import Callable, Optional + +from os import getenv +from json import dumps + +import httpx +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse + +from agenta.sdk.middleware.cache import TTLLRUCache, CACHE_CAPACITY, CACHE_TTL +from agenta.sdk.utils.constants import TRUTHY +from agenta.sdk.utils.exceptions import display_exception + +import agenta as ag + + +_SHARED_SERVICE = getenv("AGENTA_SHARED_SERVICE", "false").lower() in TRUTHY +_CACHE_ENABLED = getenv("AGENTA_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY +_UNAUTHORIZED_ALLOWED = ( + getenv("AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED", "false").lower() in TRUTHY +) +_ALWAYS_ALLOW_LIST = ["/health"] + +_cache = TTLLRUCache(capacity=CACHE_CAPACITY, ttl=CACHE_TTL) + + +class DenyResponse(JSONResponse): + def __init__( + self, + status_code: int = 401, + detail: str = "Unauthorized", + ) -> None: + super().__init__( + status_code=status_code, + content={"detail": detail}, + ) + + +class DenyException(Exception): + def __init__( + self, + status_code: int = 401, + content: str = "Unauthorized", + ) -> None: + super().__init__() + + self.status_code = status_code + self.content = content + + +class AuthMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host + self.resource_id = ( + ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.service_id + if not _SHARED_SERVICE + else None + ) + + async def dispatch(self, request: Request, call_next: Callable): + try: + if _UNAUTHORIZED_ALLOWED or request.url.path in _ALWAYS_ALLOW_LIST: + request.state.auth = {} + + else: + credentials = await self._get_credentials(request) + + request.state.auth = {"credentials": credentials} + + return await call_next(request) + + except DenyException as deny: + display_exception("Auth Middleware Exception") + + return DenyResponse( + status_code=deny.status_code, + detail=deny.content, + ) + + except: # pylint: disable=bare-except + display_exception("Auth Middleware Exception") + + return DenyResponse( + status_code=500, + detail="Auth: Unexpected Error.", + ) + + async def _get_credentials(self, request: Request) -> Optional[str]: + try: + authorization = request.headers.get("authorization", None) + + headers = {"Authorization": authorization} if authorization else None + + access_token = request.cookies.get("sAccessToken", None) + + cookies = {"sAccessToken": access_token} if access_token else None + + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + project_id = ( + # CLEANEST + baggage.get("project_id") + # ALTERNATIVE + or request.query_params.get("project_id") + ) + + params = {"action": "run_service", "resource_type": "service"} + + if self.resource_id: + params["resource_id"] = self.resource_id + + if project_id: + params["project_id"] = project_id + + _hash = dumps( + { + "headers": headers, + "cookies": cookies, + "params": params, + }, + sort_keys=True, + ) + + if _CACHE_ENABLED: + credentials = _cache.get(_hash) + + if credentials: + return credentials + + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.host}/api/permissions/verify", + headers=headers, + cookies=cookies, + params=params, + ) + + if response.status_code == 401: + raise DenyException( + status_code=401, + content="Invalid credentials", + ) + elif response.status_code == 403: + raise DenyException( + status_code=403, + content="Service execution not allowed.", + ) + elif response.status_code != 200: + raise DenyException( + status_code=400, + content="Auth: Unexpected Error.", + ) + + auth = response.json() + + if auth.get("effect") != "allow": + raise DenyException( + status_code=403, + content="Service execution not allowed.", + ) + + credentials = auth.get("credentials") + + _cache.put(_hash, credentials) + + return credentials + + except DenyException as deny: + raise deny + + except Exception as exc: # pylint: disable=bare-except + display_exception("Auth Middleware Exception (suppressed)") + + raise DenyException( + status_code=500, + content="Auth: Unexpected Error.", + ) from exc diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/cache.py b/services/completion-stateless-sdk/agenta/sdk/middleware/cache.py new file mode 100644 index 0000000000..641f4f802d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/middleware/cache.py @@ -0,0 +1,47 @@ +from os import getenv +from time import time +from collections import OrderedDict + +CACHE_CAPACITY = int(getenv("AGENTA_MIDDLEWARE_CACHE_CAPACITY", "512")) +CACHE_TTL = int(getenv("AGENTA_MIDDLEWARE_CACHE_TTL", str(5 * 60))) # 5 minutes + + +class TTLLRUCache: + def __init__(self, capacity: int, ttl: int): + self.cache = OrderedDict() + self.capacity = capacity + self.ttl = ttl + + def get(self, key): + # CACHE + if key not in self.cache: + return None + + value, expiry = self.cache[key] + # ----- + + # TTL + if time() > expiry: + del self.cache[key] + + return None + # --- + + # LRU + self.cache.move_to_end(key) + # --- + + return value + + def put(self, key, value): + # CACHE + if key in self.cache: + del self.cache[key] + # CACHE & LRU + elif len(self.cache) >= self.capacity: + self.cache.popitem(last=False) + # ----------- + + # TTL + self.cache[key] = (value, time() + self.ttl) + # --- diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/config.py b/services/completion-stateless-sdk/agenta/sdk/middleware/config.py new file mode 100644 index 0000000000..8ea9eb9ffe --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/middleware/config.py @@ -0,0 +1,254 @@ +from typing import Callable, Optional, Tuple, Dict + +from os import getenv +from json import dumps + +from pydantic import BaseModel + +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import Request, FastAPI + +import httpx + +from agenta.sdk.middleware.cache import TTLLRUCache, CACHE_CAPACITY, CACHE_TTL +from agenta.sdk.utils.constants import TRUTHY +from agenta.sdk.utils.exceptions import suppress + +import agenta as ag + + +_CACHE_ENABLED = getenv("AGENTA_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY + +_cache = TTLLRUCache(capacity=CACHE_CAPACITY, ttl=CACHE_TTL) + + +class Reference(BaseModel): + id: Optional[str] = None + slug: Optional[str] = None + version: Optional[str] = None + + +class ConfigMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host + self.application_id = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.app_id + + async def dispatch( + self, + request: Request, + call_next: Callable, + ): + request.state.config = {} + + with suppress(): + parameters, references = await self._get_config(request) + + request.state.config = { + "parameters": parameters, + "references": references, + } + + return await call_next(request) + + # @atimeit + async def _get_config(self, request: Request) -> Optional[Tuple[Dict, Dict]]: + credentials = request.state.auth.get("credentials") + + headers = None + if credentials: + headers = {"Authorization": credentials} + + application_ref = await self._parse_application_ref(request) + variant_ref = await self._parse_variant_ref(request) + environment_ref = await self._parse_environment_ref(request) + + refs = {} + if application_ref: + refs["application_ref"] = application_ref.model_dump() + if variant_ref: + refs["variant_ref"] = variant_ref.model_dump() + if environment_ref: + refs["environment_ref"] = environment_ref.model_dump() + + if not refs: + return None, None + + _hash = dumps( + { + "headers": headers, + "refs": refs, + }, + sort_keys=True, + ) + + if _CACHE_ENABLED: + config_cache = _cache.get(_hash) + + if config_cache: + parameters = config_cache.get("parameters") + references = config_cache.get("references") + + return parameters, references + + config = None + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self.host}/api/variants/configs/fetch", + headers=headers, + json=refs, + ) + + if response.status_code != 200: + return None, None + + config = response.json() + + if not config: + _cache.put(_hash, {"parameters": None, "references": None}) + + return None, None + + parameters = config.get("params") + + references = {} + + for ref_key in ["application_ref", "variant_ref", "environment_ref"]: + refs = config.get(ref_key) + ref_prefix = ref_key.split("_", maxsplit=1)[0] + + for ref_part_key in ["id", "slug", "version"]: + ref_part = refs.get(ref_part_key) + + if ref_part: + references[ref_prefix + "." + ref_part_key] = ref_part + + _cache.put(_hash, {"parameters": parameters, "references": references}) + + return parameters, references + + async def _parse_application_ref( + self, + request: Request, + ) -> Optional[Reference]: + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + body = {} + try: + body = await request.json() + except: # pylint: disable=bare-except + pass + + application_id = ( + # CLEANEST + baggage.get("application_id") + # ALTERNATIVE + or request.query_params.get("application_id") + # LEGACY + or request.query_params.get("app_id") + or self.application_id + ) + application_slug = ( + # CLEANEST + baggage.get("application_slug") + # ALTERNATIVE + or request.query_params.get("application_slug") + # LEGACY + or request.query_params.get("app_slug") + or body.get("app") + ) + + if not any([application_id, application_slug]): + return None + + return Reference( + id=application_id, + slug=application_slug, + ) + + async def _parse_variant_ref( + self, + request: Request, + ) -> Optional[Reference]: + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + body = {} + try: + body = await request.json() + except: # pylint: disable=bare-except + pass + + variant_id = ( + # CLEANEST + baggage.get("variant_id") + # ALTERNATIVE + or request.query_params.get("variant_id") + ) + variant_slug = ( + # CLEANEST + baggage.get("variant_slug") + # ALTERNATIVE + or request.query_params.get("variant_slug") + # LEGACY + or request.query_params.get("config") + or body.get("config") + ) + variant_version = ( + # CLEANEST + baggage.get("variant_version") + # ALTERNATIVE + or request.query_params.get("variant_version") + ) + + if not any([variant_id, variant_slug, variant_version]): + return None + + return Reference( + id=variant_id, + slug=variant_slug, + version=variant_version, + ) + + async def _parse_environment_ref( + self, + request: Request, + ) -> Optional[Reference]: + baggage = request.state.otel.get("baggage") if request.state.otel else {} + + body = {} + try: + body = await request.json() + except: # pylint: disable=bare-except + pass + + environment_id = ( + # CLEANEST + baggage.get("environment_id") + # ALTERNATIVE + or request.query_params.get("environment_id") + ) + environment_slug = ( + # CLEANEST + baggage.get("environment_slug") + # ALTERNATIVE + or request.query_params.get("environment_slug") + # LEGACY + or request.query_params.get("environment") + or body.get("environment") + ) + environment_version = ( + # CLEANEST + baggage.get("environment_version") + # ALTERNATIVE + or request.query_params.get("environment_version") + ) + + if not any([environment_id, environment_slug, environment_version]): + return None + + return Reference( + id=environment_id, + slug=environment_slug, + version=environment_version, + ) diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/cors.py b/services/completion-stateless-sdk/agenta/sdk/middleware/cors.py new file mode 100644 index 0000000000..80f0a30fc5 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/middleware/cors.py @@ -0,0 +1,27 @@ +from os import getenv + +from starlette.types import ASGIApp, Receive, Scope, Send +from fastapi.middleware.cors import CORSMiddleware as _CORSMiddleware + +_TRUTHY = {"true", "1", "t", "y", "yes", "on", "enable", "enabled"} +_USE_CORS = getenv("AGENTA_USE_CORS", "enable").lower() in _TRUTHY + + +class CORSMiddleware(_CORSMiddleware): + def __init__(self, app: ASGIApp): + if _USE_CORS: + super().__init__( + app=app, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], + allow_credentials=True, + expose_headers=None, + max_age=None, + ) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if _USE_CORS: + return await super().__call__(scope, receive, send) + + return await self.app(scope, receive, send) diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/otel.py b/services/completion-stateless-sdk/agenta/sdk/middleware/otel.py new file mode 100644 index 0000000000..0a6396f979 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/middleware/otel.py @@ -0,0 +1,40 @@ +from typing import Callable + +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import Request, FastAPI + +from opentelemetry.baggage.propagation import W3CBaggagePropagator + +from agenta.sdk.utils.exceptions import suppress + + +class OTelMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + async def dispatch(self, request: Request, call_next: Callable): + request.state.otel = {} + + with suppress(): + baggage = await self._get_baggage(request) + + request.state.otel = {"baggage": baggage} + + return await call_next(request) + + async def _get_baggage( + self, + request, + ): + _baggage = {"baggage": request.headers.get("Baggage", "")} + + context = W3CBaggagePropagator().extract(_baggage) + + baggage = {} + + if context: + for partial in context.values(): + for key, value in partial.items(): + baggage[key] = value + + return baggage diff --git a/services/completion-stateless-sdk/agenta/sdk/middleware/vault.py b/services/completion-stateless-sdk/agenta/sdk/middleware/vault.py new file mode 100644 index 0000000000..c7b6a8877f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/middleware/vault.py @@ -0,0 +1,158 @@ +from typing import Callable, Dict, Optional + +from enum import Enum +from os import getenv +from json import dumps + +from pydantic import BaseModel + +import httpx +from starlette.middleware.base import BaseHTTPMiddleware +from fastapi import FastAPI, Request + +from agenta.sdk.middleware.cache import TTLLRUCache, CACHE_CAPACITY, CACHE_TTL +from agenta.sdk.utils.constants import TRUTHY +from agenta.sdk.utils.exceptions import suppress, display_exception + +import agenta as ag + + +# TODO: Move to backend client types +class SecretKind(str, Enum): + PROVIDER_KEY = "provider_key" + + +# TODO: Move to backend client types +class ProviderKind(str, Enum): + OPENAI = "openai" + COHERE = "cohere" + ANYSCALE = "anyscale" + DEEPINFRA = "deepinfra" + ALEPHALPHA = "alephalpha" + GROQ = "groq" + MISTRALAI = "mistralai" + ANTHROPIC = "anthropic" + PERPLEXITYAI = "perplexityai" + TOGETHERAI = "togetherai" + OPENROUTER = "openrouter" + GEMINI = "gemini" + + +# TODO: Move to backend client types +class ProviderKeyDTO(BaseModel): + provider: ProviderKind + key: str + + +# TODO: Move to backend client types +class SecretDTO(BaseModel): + kind: SecretKind = "provider_key" + data: ProviderKeyDTO + + +_CACHE_ENABLED = getenv("AGENTA_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY + +_cache = TTLLRUCache(capacity=CACHE_CAPACITY, ttl=CACHE_TTL) + + +class VaultMiddleware(BaseHTTPMiddleware): + def __init__(self, app: FastAPI): + super().__init__(app) + + self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host + + async def dispatch( + self, + request: Request, + call_next: Callable, + ): + request.state.vault = {} + + with suppress(): + secrets = await self._get_secrets(request) + + request.state.vault = {"secrets": secrets} + + return await call_next(request) + + async def _get_secrets(self, request: Request) -> Optional[Dict]: + credentials = request.state.auth.get("credentials") + + headers = None + if credentials: + headers = {"Authorization": credentials} + + _hash = dumps( + { + "headers": headers, + }, + sort_keys=True, + ) + + if _CACHE_ENABLED: + secrets_cache = _cache.get(_hash) + + if secrets_cache: + secrets = secrets_cache.get("secrets") + + return secrets + + local_secrets = [] + + try: + for provider_kind in ProviderKind: + provider = provider_kind.value + key_name = f"{provider.upper()}_API_KEY" + key = getenv(key_name) + + if not key: + continue + + secret = SecretDTO( + kind=SecretKind.PROVIDER_KEY, + data=ProviderKeyDTO( + provider=provider, + key=key, + ), + ) + + local_secrets.append(secret.model_dump()) + except: # pylint: disable=bare-except + display_exception("Vault: Local Secrets Exception") + + vault_secrets = [] + + try: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.host}/api/vault/v1/secrets", + headers=headers, + ) + + if response.status_code != 200: + vault_secrets = [] + + else: + vault = response.json() + + vault_secrets = vault.get("secrets") + except: # pylint: disable=bare-except + display_exception("Vault: Vault Secrets Exception") + + merged_secrets = {} + + if local_secrets: + for secret in local_secrets: + provider = secret["data"]["provider"] + merged_secrets[provider] = secret + + if vault_secrets: + for secret in vault_secrets: + provider = secret["data"]["provider"] + merged_secrets[provider] = secret + + secrets = list(merged_secrets.values()) + + _cache.put(_hash, {"secrets": secrets}) + + return secrets diff --git a/services/completion-stateless-sdk/agenta/sdk/router.py b/services/completion-stateless-sdk/agenta/sdk/router.py new file mode 100644 index 0000000000..b4cb63b59b --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/router.py @@ -0,0 +1,8 @@ +from fastapi import APIRouter + +router = APIRouter() + + +@router.get("/health") +def health(): + return {"status": "ok"} diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/__init__.py b/services/completion-stateless-sdk/agenta/sdk/tracing/__init__.py new file mode 100644 index 0000000000..734c38b64d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/__init__.py @@ -0,0 +1 @@ +from .tracing import Tracing, get_tracer diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/attributes.py b/services/completion-stateless-sdk/agenta/sdk/tracing/attributes.py new file mode 100644 index 0000000000..5cf4adbff1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/attributes.py @@ -0,0 +1,141 @@ +from json import loads, dumps +from typing import Optional, Union, Sequence, Any, Dict + +Primitive = Union[str, int, float, bool, bytes] +PrimitivesSequence = Sequence[Primitive] +Attribute = Union[Primitive, PrimitivesSequence] + + +def _marshal( + unmarshalled: Dict[str, Any], + *, + parent_key: Optional[str] = "", + depth: Optional[int] = 0, + max_depth: Optional[int] = None, +) -> Dict[str, Any]: + """ + Marshals a dictionary of unmarshalled attributes into a flat dictionary + + Example: + unmarshalled = { + "ag": { + "type": "tree", + "node": { + "name": "root", + "children": [ + { + "name": "child1", + }, + { + "name": "child2", + } + ] + } + } + } + marshalled = { + "ag.type": "tree", + "ag.node.name": "root", + "ag.node.children.0.name": "child1", + "ag.node.children.1.name": "child2" + } + """ + marshalled = {} + + # If max_depth is set and we've reached it, + # just return the unmarshalled attributes + if max_depth is not None and depth >= max_depth: + marshalled[parent_key] = unmarshalled + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + + return marshalled + + # Otherwise, + # iterate over the unmarshalled attributes and marshall them + for key, value in unmarshalled.items(): + child_key = f"{parent_key}.{key}" if parent_key else key + + if isinstance(value, dict): + dict_key = child_key + + marshalled.update( + _marshal( + value, + parent_key=dict_key, + depth=depth + 1, + max_depth=max_depth, + ) + ) + elif isinstance(value, list): + if max_depth is not None and depth + 1 >= max_depth: + marshalled[child_key] = value + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + else: + for i, item in enumerate(value): + list_key = f"{child_key}.{i}" + + if isinstance(item, (dict, list)): + marshalled.update( + _marshal( + item, + parent_key=list_key, + depth=depth + 1, + max_depth=max_depth, + ) + ) + else: + marshalled[list_key] = item + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + else: + marshalled[child_key] = value + # MISSING ENCODING TO JSON IF NOT PRIMITIVE + + return marshalled + + +def _encode_key( + namespace: Optional[str] = None, + key: str = "", +) -> str: + if namespace is None: + return key + + return f"ag.{namespace}.{key}" + + +def _encode_value( + value: Any, +) -> Optional[Attribute]: + if value is None: + return None + + if isinstance(value, (str, int, float, bool, bytes)): + return value + + if isinstance(value, dict) or isinstance(value, list): + encoded = dumps(value) + value = "@ag.type=json:" + encoded + return value + + return repr(value) + + +def serialize( + *, + namespace: str, + attributes: Dict[str, Any], + max_depth: Optional[int] = None, +) -> Dict[str, str]: + if not isinstance(attributes, dict): + return {} + + _attributes = { + k: v + for k, v in { + _encode_key(namespace, key): _encode_value(value) + for key, value in _marshal(attributes, max_depth=max_depth).items() + }.items() + if v is not None + } + + return _attributes diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/conventions.py b/services/completion-stateless-sdk/agenta/sdk/tracing/conventions.py new file mode 100644 index 0000000000..018cf64dea --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/conventions.py @@ -0,0 +1,49 @@ +from enum import Enum +from re import fullmatch + +from opentelemetry.trace import SpanKind + + +class Reference(str, Enum): + # + VARIANT_ID = "variant.id" + VARIANT_SLUG = "variant.slug" + VARIANT_VERSION = "variant.version" + # + ENVIRONMENT_ID = "environment.id" + ENVIRONMENT_SLUG = "environment.slug" + ENVIRONMENT_VERSION = "environment.version" + # + APPLICATION_ID = "application.id" + APPLICATION_SLUG = "application.slug" + # + + +_PATTERN = r"[A-Za-z0-9._-]+" + + +def is_valid_attribute_key( + string: str, +): + return bool(fullmatch(_PATTERN, string)) + + +def parse_span_kind(type: str) -> SpanKind: + kind = SpanKind.INTERNAL + if type in [ + "agent", + "chain", + "workflow", + ]: + kind = SpanKind.SERVER + elif type in [ + "tool", + "embedding", + "query", + "completion", + "chat", + "rerank", + ]: + kind = SpanKind.CLIENT + + return kind diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/exporters.py b/services/completion-stateless-sdk/agenta/sdk/tracing/exporters.py new file mode 100644 index 0000000000..7a38201d5a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/exporters.py @@ -0,0 +1,103 @@ +from typing import Sequence, Dict, List + +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SpanExporter, + SpanExportResult, + ReadableSpan, +) + +from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.context.exporting import ( + exporting_context_manager, + exporting_context, + ExportingContext, +) + + +class InlineTraceExporter(SpanExporter): + def __init__(self, registry: Dict[str, List[ReadableSpan]]): + self._shutdown = False + self._registry = registry + + def export( + self, + spans: Sequence[ReadableSpan], + ) -> SpanExportResult: + if self._shutdown: + return + + with suppress(): + for span in spans: + trace_id = span.get_span_context().trace_id + + if trace_id not in self._registry: + self._registry[trace_id] = [] + + self._registry[trace_id].append(span) + + def shutdown(self) -> None: + self._shutdown = True + + def force_flush(self, timeout_millis: int = 30000) -> bool: + return True + + def is_ready( + self, + trace_id: int, + ) -> bool: + is_ready = trace_id in self._registry + + return is_ready + + def fetch( + self, + trace_id: int, + ) -> List[ReadableSpan]: + trace = self._registry.get(trace_id, []) + + if trace_id in self._registry: + del self._registry[trace_id] + + return trace + + +class OTLPExporter(OTLPSpanExporter): + _MAX_RETRY_TIMEOUT = 2 + + def __init__(self, *args, credentials: Dict[int, str] = None, **kwargs): + super().__init__(*args, **kwargs) + + self.credentials = credentials + + def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: + credentials = None + + if self.credentials: + trace_ids = set(span.get_span_context().trace_id for span in spans) + + if len(trace_ids) == 1: + trace_id = trace_ids.pop() + + if trace_id in self.credentials: + credentials = self.credentials.pop(trace_id) + + with exporting_context_manager( + context=ExportingContext( + credentials=credentials, + ) + ): + return super().export(spans) + + def _export(self, serialized_data: bytes): + credentials = exporting_context.get().credentials + + if credentials: + self._session.headers.update({"Authorization": credentials}) + + return super()._export(serialized_data) + + +ConsoleExporter = ConsoleSpanExporter +InlineExporter = InlineTraceExporter diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/inline.py b/services/completion-stateless-sdk/agenta/sdk/tracing/inline.py new file mode 100644 index 0000000000..3bf55cdf82 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/inline.py @@ -0,0 +1,1146 @@ +############################ +### services.shared.dtos ### +### -------------------- ### + +from typing import Optional + +from pydantic import BaseModel +from uuid import UUID +from datetime import datetime +from enum import Enum +from collections import OrderedDict + + +class ProjectScopeDTO(BaseModel): + project_id: UUID + + +class LifecycleDTO(BaseModel): + created_at: datetime + updated_at: Optional[datetime] = None + + updated_by_id: Optional[UUID] = None + + +### -------------------- ### +### services.shared.dtos ### +############################ + + +################################### +### services.observability.dtos ### +### --------------------------- ### + +from typing import List, Dict, Any, Union, Optional + +from enum import Enum +from datetime import datetime +from uuid import UUID + + +class TimeDTO(BaseModel): + start: datetime + end: datetime + + +class StatusCode(Enum): + UNSET = "UNSET" + OK = "OK" + ERROR = "ERROR" + + +class StatusDTO(BaseModel): + code: StatusCode + message: Optional[str] = None + stacktrace: Optional[str] = None + + +AttributeValueType = Any +Attributes = Dict[str, AttributeValueType] + + +class TreeType(Enum): + # --- VARIANTS --- # + INVOCATION = "invocation" + # --- VARIANTS --- # + + +class NodeType(Enum): + # --- VARIANTS --- # + ## SPAN_KIND_SERVER + AGENT = "agent" + WORKFLOW = "workflow" + CHAIN = "chain" + ## SPAN_KIND_INTERNAL + TASK = "task" + ## SPAN_KIND_CLIENT + TOOL = "tool" + EMBEDDING = "embedding" + QUERY = "query" + COMPLETION = "completion" + CHAT = "chat" + RERANK = "rerank" + # --- VARIANTS --- # + + +class RootDTO(BaseModel): + id: UUID + + +class TreeDTO(BaseModel): + id: UUID + type: Optional[TreeType] = None + + +class NodeDTO(BaseModel): + id: UUID + type: Optional[NodeType] = None + name: str + + +Data = Dict[str, Any] +Metrics = Dict[str, Any] +Metadata = Dict[str, Any] +Tags = Dict[str, Any] +Refs = Dict[str, Any] + + +class LinkDTO(BaseModel): + type: str + id: UUID + tree_id: Optional[UUID] = None + + +class ParentDTO(BaseModel): + id: UUID + + +class OTelSpanKind(Enum): + SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED" + # INTERNAL + SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL" + # SYNCHRONOUS + SPAN_KIND_SERVER = "SPAN_KIND_SERVER" + SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT" + # ASYNCHRONOUS + SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER" + SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER" + + +class OTelStatusCode(Enum): + STATUS_CODE_OK = "STATUS_CODE_OK" + STATUS_CODE_ERROR = "STATUS_CODE_ERROR" + STATUS_CODE_UNSET = "STATUS_CODE_UNSET" + + +class OTelContextDTO(BaseModel): + trace_id: str + span_id: str + + +class OTelEventDTO(BaseModel): + name: str + timestamp: datetime + + attributes: Optional[Attributes] = None + + +class OTelLinkDTO(BaseModel): + context: OTelContextDTO + + attributes: Optional[Attributes] = None + + +class OTelExtraDTO(BaseModel): + kind: Optional[str] = None + + attributes: Optional[Attributes] = None + events: Optional[List[OTelEventDTO]] = None + links: Optional[List[OTelLinkDTO]] = None + + +class SpanDTO(BaseModel): + scope: Optional[ProjectScopeDTO] = None + + lifecycle: Optional[LifecycleDTO] = None + + root: RootDTO + tree: TreeDTO + node: NodeDTO + + parent: Optional[ParentDTO] = None + + time: TimeDTO + status: StatusDTO + + data: Optional[Data] = None + metrics: Optional[Metrics] = None + meta: Optional[Metadata] = None + tags: Optional[Tags] = None + refs: Optional[Refs] = None + + links: Optional[List[LinkDTO]] = None + + otel: Optional[OTelExtraDTO] = None + + nodes: Optional[Dict[str, Union["SpanDTO", List["SpanDTO"]]]] = None + + +class OTelSpanDTO(BaseModel): + context: OTelContextDTO + + name: str + kind: OTelSpanKind = OTelSpanKind.SPAN_KIND_UNSPECIFIED + + start_time: datetime + end_time: datetime + + status_code: OTelStatusCode = OTelStatusCode.STATUS_CODE_UNSET + status_message: Optional[str] = None + + attributes: Optional[Attributes] = None + events: Optional[List[OTelEventDTO]] = None + + parent: Optional[OTelContextDTO] = None + links: Optional[List[OTelLinkDTO]] = None + + +### --------------------------- ### +### services.observability.dtos ### +################################### + + +#################################### +### services.observability.utils ### +### ---------------------------- ### + +from typing import List, Dict, OrderedDict + + +def parse_span_dtos_to_span_idx( + span_dtos: List[SpanDTO], +) -> Dict[str, SpanDTO]: + span_idx = {span_dto.node.id: span_dto for span_dto in span_dtos} + + return span_idx + + +def parse_span_idx_to_span_id_tree( + span_idx: Dict[str, SpanDTO], +) -> OrderedDict: + span_id_tree = OrderedDict() + index = {} + + def push(span_dto: SpanDTO) -> None: + if span_dto.parent is None: + span_id_tree[span_dto.node.id] = OrderedDict() + index[span_dto.node.id] = span_id_tree[span_dto.node.id] + elif span_dto.parent.id in index: + index[span_dto.parent.id][span_dto.node.id] = OrderedDict() + index[span_dto.node.id] = index[span_dto.parent.id][span_dto.node.id] + + for span_dto in sorted(span_idx.values(), key=lambda span_dto: span_dto.time.start): + push(span_dto) + + return span_id_tree + + +def cumulate_costs( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, SpanDTO], +) -> None: + def _get_unit(span: SpanDTO): + if span.metrics is not None: + return span.metrics.get("unit.costs.total", 0.0) + + return 0.0 + + def _get_acc(span: SpanDTO): + if span.metrics is not None: + return span.metrics.get("acc.costs.total", 0.0) + + return 0.0 + + def _acc(a: float, b: float): + return a + b + + def _set(span: SpanDTO, cost: float): + if span.metrics is None: + span.metrics = {} + + if cost != 0.0: + span.metrics["acc.costs.total"] = cost + + _cumulate_tree_dfs(spans_id_tree, spans_idx, _get_unit, _get_acc, _acc, _set) + + +def cumulate_tokens( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, dict], +) -> None: + def _get_unit(span: SpanDTO): + _tokens = { + "prompt": 0.0, + "completion": 0.0, + "total": 0.0, + } + + if span.metrics is not None: + return { + "prompt": span.metrics.get("unit.tokens.prompt", 0.0), + "completion": span.metrics.get("unit.tokens.completion", 0.0), + "total": span.metrics.get("unit.tokens.total", 0.0), + } + + return _tokens + + def _get_acc(span: SpanDTO): + _tokens = { + "prompt": 0.0, + "completion": 0.0, + "total": 0.0, + } + + if span.metrics is not None: + return { + "prompt": span.metrics.get("acc.tokens.prompt", 0.0), + "completion": span.metrics.get("acc.tokens.completion", 0.0), + "total": span.metrics.get("acc.tokens.total", 0.0), + } + + return _tokens + + def _acc(a: dict, b: dict): + return { + "prompt": a.get("prompt", 0.0) + b.get("prompt", 0.0), + "completion": a.get("completion", 0.0) + b.get("completion", 0.0), + "total": a.get("total", 0.0) + b.get("total", 0.0), + } + + def _set(span: SpanDTO, tokens: dict): + if span.metrics is None: + span.metrics = {} + + if tokens.get("prompt", 0.0) != 0.0: + span.metrics["acc.tokens.prompt"] = tokens.get("prompt", 0.0) + if tokens.get("completion", 0.0) != 0.0: + span.metrics["acc.tokens.completion"] = ( + tokens.get("completion", 0.0) + if tokens.get("completion", 0.0) != 0.0 + else None + ) + if tokens.get("total", 0.0) != 0.0: + span.metrics["acc.tokens.total"] = ( + tokens.get("total", 0.0) if tokens.get("total", 0.0) != 0.0 else None + ) + + _cumulate_tree_dfs(spans_id_tree, spans_idx, _get_unit, _get_acc, _acc, _set) + + +def _cumulate_tree_dfs( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, SpanDTO], + get_unit_metric, + get_acc_metric, + accumulate_metric, + set_metric, +): + for span_id, children_spans_id_tree in spans_id_tree.items(): + children_spans_id_tree: OrderedDict + + cumulated_metric = get_unit_metric(spans_idx[span_id]) + + _cumulate_tree_dfs( + children_spans_id_tree, + spans_idx, + get_unit_metric, + get_acc_metric, + accumulate_metric, + set_metric, + ) + + for child_span_id in children_spans_id_tree.keys(): + marginal_metric = get_acc_metric(spans_idx[child_span_id]) + cumulated_metric = accumulate_metric(cumulated_metric, marginal_metric) + + set_metric(spans_idx[span_id], cumulated_metric) + + +def connect_children( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, dict], +) -> None: + _connect_tree_dfs(spans_id_tree, spans_idx) + + +def _connect_tree_dfs( + spans_id_tree: OrderedDict, + spans_idx: Dict[str, SpanDTO], +): + for span_id, children_spans_id_tree in spans_id_tree.items(): + children_spans_id_tree: OrderedDict + + parent_span = spans_idx[span_id] + + parent_span.nodes = dict() + + _connect_tree_dfs(children_spans_id_tree, spans_idx) + + for child_span_id in children_spans_id_tree.keys(): + child_span_name = spans_idx[child_span_id].node.name + if child_span_name not in parent_span.nodes: + parent_span.nodes[child_span_name] = spans_idx[child_span_id] + else: + if not isinstance(parent_span.nodes[child_span_name], list): + parent_span.nodes[child_span_name] = [ + parent_span.nodes[child_span_name] + ] + + parent_span.nodes[child_span_name].append(spans_idx[child_span_id]) + + if len(parent_span.nodes) == 0: + parent_span.nodes = None + + +### ---------------------------- ### +### services.observability.utils ### +#################################### + + +######################################################## +### apis.fastapi.observability.opentelemetry.semconv ### +### ------------------------------------------------ ### + +from json import loads + +VERSION = "0.4.1" + +V_0_4_1_ATTRIBUTES_EXACT = [ + # OPENLLMETRY + ("gen_ai.system", "ag.meta.system"), + ("gen_ai.request.base_url", "ag.meta.request.base_url"), + ("gen_ai.request.endpoint", "ag.meta.request.endpoint"), + ("gen_ai.request.headers", "ag.meta.request.headers"), + ("gen_ai.request.type", "ag.type.node"), + ("gen_ai.request.streaming", "ag.meta.request.streaming"), + ("gen_ai.request.model", "ag.meta.request.model"), + ("gen_ai.request.max_tokens", "ag.meta.request.max_tokens"), + ("gen_ai.request.temperature", "ag.meta.request.temperature"), + ("gen_ai.request.top_p", "ag.meta.request.top_p"), + ("gen_ai.response.model", "ag.meta.response.model"), + ("gen_ai.usage.prompt_tokens", "ag.metrics.unit.tokens.prompt"), + ("gen_ai.usage.completion_tokens", "ag.metrics.unit.tokens.completion"), + ("gen_ai.usage.total_tokens", "ag.metrics.unit.tokens.total"), + ("llm.headers", "ag.meta.request.headers"), + ("llm.request.type", "ag.type.node"), + ("llm.top_k", "ag.meta.request.top_k"), + ("llm.is_streaming", "ag.meta.request.streaming"), + ("llm.usage.total_tokens", "ag.metrics.unit.tokens.total"), + ("gen_ai.openai.api_base", "ag.meta.request.base_url"), + ("db.system", "ag.meta.system"), + ("db.vector.query.top_k", "ag.meta.request.top_k"), + ("pinecone.query.top_k", "ag.meta.request.top_k"), + ("traceloop.span.kind", "ag.type.node"), + ("traceloop.entity.name", "ag.node.name"), + # OPENINFERENCE + ("output.value", "ag.data.outputs"), + ("input.value", "ag.data.inputs"), + ("embedding.model_name", "ag.meta.request.model"), + ("llm.invocation_parameters", "ag.meta.request"), + ("llm.model_name", "ag.meta.request.model"), + ("llm.provider", "ag.meta.provider"), + ("llm.system", "ag.meta.system"), +] +V_0_4_1_ATTRIBUTES_PREFIX = [ + # OPENLLMETRY + ("gen_ai.prompt", "ag.data.inputs.prompt"), + ("gen_ai.completion", "ag.data.outputs.completion"), + ("llm.request.functions", "ag.data.inputs.functions"), + ("llm.request.tools", "ag.data.inputs.tools"), + # OPENINFERENCE + ("llm.token_count", "ag.metrics.unit.tokens"), + ("llm.input_messages", "ag.data.inputs.prompt"), + ("llm.output_messages", "ag.data.outputs.completion"), +] + +V_0_4_1_ATTRIBUTES_DYNAMIC = [ + # OPENLLMETRY + ("traceloop.entity.input", lambda x: ("ag.data.inputs", loads(x).get("inputs"))), + ("traceloop.entity.output", lambda x: ("ag.data.outputs", loads(x).get("outputs"))), +] + + +V_0_4_1_MAPS = { + "attributes": { + "exact": { + "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_EXACT[::-1]}, + "to": {agenta: otel for otel, agenta in V_0_4_1_ATTRIBUTES_EXACT[::-1]}, + }, + "prefix": { + "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_PREFIX[::-1]}, + "to": {agenta: otel for otel, agenta in V_0_4_1_ATTRIBUTES_PREFIX[::-1]}, + }, + "dynamic": { + "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_DYNAMIC[::-1]} + }, + }, +} +V_0_4_1_KEYS = { + "attributes": { + "exact": { + "from": list(V_0_4_1_MAPS["attributes"]["exact"]["from"].keys()), + "to": list(V_0_4_1_MAPS["attributes"]["exact"]["to"].keys()), + }, + "prefix": { + "from": list(V_0_4_1_MAPS["attributes"]["prefix"]["from"].keys()), + "to": list(V_0_4_1_MAPS["attributes"]["prefix"]["to"].keys()), + }, + "dynamic": { + "from": list(V_0_4_1_MAPS["attributes"]["dynamic"]["from"].keys()), + }, + }, +} + + +MAPS = { + "0.4.1": V_0_4_1_MAPS, # LATEST +} +KEYS = { + "0.4.1": V_0_4_1_KEYS, # LATEST +} + +CODEX = {"maps": MAPS[VERSION], "keys": KEYS[VERSION]} + + +### ------------------------------------------------ ### +### apis.fastapi.observability.opentelemetry.semconv ### +######################################################## + + +######################################## +### apis.fastapi.observability.utils ### +### -------------------------------- ### + +from typing import Optional, Union, Tuple, Any, List, Dict +from uuid import UUID +from collections import OrderedDict +from json import loads, JSONDecodeError, dumps +from copy import copy + + +def _unmarshal_attributes( + marshalled: Dict[str, Any], +) -> Dict[str, Any]: + """ + Unmarshals a dictionary of marshalled attributes into a nested dictionary + + Example: + marshalled = { + "ag.type": "tree", + "ag.node.name": "root", + "ag.node.children.0.name": "child1", + "ag.node.children.1.name": "child2" + } + unmarshalled = { + "ag": { + "type": "tree", + "node": { + "name": "root", + "children": [ + { + "name": "child1", + }, + { + "name": "child2", + } + ] + } + } + } + """ + unmarshalled = {} + + for key, value in marshalled.items(): + keys = key.split(".") + + level = unmarshalled + + for i, part in enumerate(keys[:-1]): + if part.isdigit(): + part = int(part) + + if not isinstance(level, list): + level = [] + + while len(level) <= part: + level.append({}) + + level = level[part] + + else: + if part not in level: + level[part] = {} if not keys[i + 1].isdigit() else [] + + level = level[part] + + last_key = keys[-1] + + if last_key.isdigit(): + last_key = int(last_key) + + if not isinstance(level, list): + level = [] + + while len(level) <= last_key: + level.append(None) + + level[last_key] = value + + else: + level[last_key] = value + + return unmarshalled + + +def _encode_key( + namespace, + key: str, +) -> str: + return f"ag.{namespace}.{key}" + + +def _decode_key( + namespace, + key: str, +) -> str: + return key.replace(f"ag.{namespace}.", "") + + +def _decode_value( + value: Any, +) -> Any: + if isinstance(value, (int, float, bool, bytes)): + return value + + if isinstance(value, str): + if value == "@ag.type=none:": + return None + + if value.startswith("@ag.type=json:"): + encoded = value[len("@ag.type=json:") :] + value = loads(encoded) + return value + + return value + + return value + + +def _get_attributes( + attributes: Attributes, + namespace: str, +): + return { + _decode_key(namespace, key): _decode_value(value) + for key, value in attributes.items() + if key != _decode_key(namespace, key) + } + + +def _parse_from_types( + otel_span_dto: OTelSpanDTO, +) -> dict: + types = _get_attributes(otel_span_dto.attributes, "type") + + if types.get("tree"): + del otel_span_dto.attributes[_encode_key("type", "tree")] + + if types.get("node"): + del otel_span_dto.attributes[_encode_key("type", "node")] + + return types + + +def _parse_from_semconv( + attributes: Attributes, +) -> None: + _attributes = copy(attributes) + + for old_key, value in _attributes.items(): + if old_key in CODEX["keys"]["attributes"]["exact"]["from"]: + new_key = CODEX["maps"]["attributes"]["exact"]["from"][old_key] + + attributes[new_key] = value + + del attributes[old_key] + + else: + for prefix_key in CODEX["keys"]["attributes"]["prefix"]["from"]: + if old_key.startswith(prefix_key): + prefix = CODEX["maps"]["attributes"]["prefix"]["from"][prefix_key] + + new_key = old_key.replace(prefix_key, prefix) + + attributes[new_key] = value + + del attributes[old_key] + + for dynamic_key in CODEX["keys"]["attributes"]["dynamic"]["from"]: + if old_key == dynamic_key: + try: + new_key, new_value = CODEX["maps"]["attributes"]["dynamic"][ + "from" + ][dynamic_key](value) + + attributes[new_key] = new_value + + except: # pylint: disable=bare-except + pass + + +def _parse_from_links( + otel_span_dto: OTelSpanDTO, +) -> dict: + # TESTING + otel_span_dto.links = [ + OTelLinkDTO( + context=otel_span_dto.context, + attributes={"ag.type.link": "testcase"}, + ) + ] + # ------- + + # LINKS + links = None + otel_links = None + + if otel_span_dto.links: + links = list() + otel_links = list() + + for link in otel_span_dto.links: + _links = _get_attributes(link.attributes, "type") + + if _links: + link_type = _links.get("link") + link_tree_id = str(UUID(link.context.trace_id[2:])) + link_node_id = str( + UUID(link.context.trace_id[2 + 16 :] + link.context.span_id[2:]) + ) + + links.append( + LinkDTO( + type=link_type, + tree_id=link_tree_id, + id=link_node_id, + ) + ) + else: + otel_links.append(link) + + links = links if links else None + otel_links = otel_links if otel_links else None + + otel_span_dto.links = otel_links + + return links + + +def _parse_from_attributes( + otel_span_dto: OTelSpanDTO, +) -> Tuple[dict, dict, dict, dict, dict]: + # DATA + _data = _get_attributes(otel_span_dto.attributes, "data") + + for key in _data.keys(): + del otel_span_dto.attributes[_encode_key("data", key)] + + # _data = _unmarshal_attributes(_data) + _data = _data if _data else None + + # METRICS + _metrics = _get_attributes(otel_span_dto.attributes, "metrics") + + for key in _metrics.keys(): + del otel_span_dto.attributes[_encode_key("metrics", key)] + + # _metrics = _unmarshal_attributes(_metrics) + _metrics = _metrics if _metrics else None + + # META + _meta = _get_attributes(otel_span_dto.attributes, "meta") + + for key in _meta.keys(): + del otel_span_dto.attributes[_encode_key("meta", key)] + + # _meta = _unmarshal_attributes(_meta) + _meta = _meta if _meta else None + + # TAGS + _tags = _get_attributes(otel_span_dto.attributes, "tags") + + for key in _tags.keys(): + del otel_span_dto.attributes[_encode_key("tags", key)] + + _tags = _tags if _tags else None + + # REFS + _refs = _get_attributes(otel_span_dto.attributes, "refs") + + for key in _refs.keys(): + del otel_span_dto.attributes[_encode_key("refs", key)] + + _refs = _refs if _refs else None + + if len(otel_span_dto.attributes.keys()) < 1: + otel_span_dto.attributes = None + + return _data, _metrics, _meta, _tags, _refs + + +def parse_from_otel_span_dto( + otel_span_dto: OTelSpanDTO, +) -> SpanDTO: + lifecyle = LifecycleDTO( + created_at=datetime.now(), + ) + + _parse_from_semconv(otel_span_dto.attributes) + + types = _parse_from_types(otel_span_dto) + + tree_id = UUID(otel_span_dto.context.trace_id[2:]) + + tree_type: str = types.get("tree") + + tree = TreeDTO( + id=tree_id, + type=tree_type.lower() if tree_type else None, + ) + + node_id = UUID(tree_id.hex[16:] + otel_span_dto.context.span_id[2:]) + + node_type = NodeType.TASK + try: + node_type = NodeType(types.get("node", "").lower()) + except: # pylint: disable=bare-except + pass + + node = NodeDTO( + id=node_id, + type=node_type, + name=otel_span_dto.name, + ) + + parent = ( + ParentDTO( + id=( + UUID( + otel_span_dto.parent.trace_id[2 + 16 :] + + otel_span_dto.parent.span_id[2:] + ) + ) + ) + if otel_span_dto.parent + else None + ) + + time = TimeDTO( + start=otel_span_dto.start_time, + end=otel_span_dto.end_time, + ) + + status = StatusDTO( + code=otel_span_dto.status_code.value.replace("STATUS_CODE_", ""), + message=otel_span_dto.status_message, + ) + + links = _parse_from_links(otel_span_dto) + + data, metrics, meta, tags, refs = _parse_from_attributes(otel_span_dto) + + duration = (otel_span_dto.end_time - otel_span_dto.start_time).total_seconds() + + if metrics is None: + metrics = dict() + + metrics["acc.duration.total"] = round(duration * 1_000, 3) # milliseconds + + root_id = str(tree_id) + if refs is not None: + root_id = refs.get("scenario.id", root_id) + + root = RootDTO(id=UUID(root_id)) + + otel = OTelExtraDTO( + kind=otel_span_dto.kind.value, + attributes=otel_span_dto.attributes, + events=otel_span_dto.events, + links=otel_span_dto.links, + ) + + span_dto = SpanDTO( + lifecycle=lifecyle, + root=root, + tree=tree, + node=node, + parent=parent, + time=time, + status=status, + data=data, + metrics=metrics, + meta=meta, + tags=tags, + refs=refs, + links=links, + otel=otel, + ) + + return span_dto + + +def parse_to_agenta_span_dto( + span_dto: SpanDTO, +) -> SpanDTO: + # DATA + if span_dto.data: + span_dto.data = _unmarshal_attributes(span_dto.data) + + if "outputs" in span_dto.data: + if "__default__" in span_dto.data["outputs"]: + span_dto.data["outputs"] = span_dto.data["outputs"]["__default__"] + + # METRICS + if span_dto.metrics: + span_dto.metrics = _unmarshal_attributes(span_dto.metrics) + + # META + if span_dto.meta: + span_dto.meta = _unmarshal_attributes(span_dto.meta) + + # TAGS + if span_dto.tags: + span_dto.tags = _unmarshal_attributes(span_dto.tags) + + # REFS + if span_dto.refs: + span_dto.refs = _unmarshal_attributes(span_dto.refs) + + for link in span_dto.links: + link.tree_id = None + + if span_dto.nodes: + for v in span_dto.nodes.values(): + if isinstance(v, list): + for n in v: + parse_to_agenta_span_dto(n) + else: + parse_to_agenta_span_dto(v) + + # MASK LINKS FOR NOW + span_dto.links = None + # ------------------ + + # MASK LIFECYCLE FOR NOW + # span_dto.lifecycle = None + if span_dto.lifecycle: + span_dto.lifecycle.updated_at = None + span_dto.lifecycle.updated_by_id = None + # ---------------------- + + return span_dto + + +### -------------------------------- ### +### apis.fastapi.observability.utils ### +######################################## + + +from litellm import cost_calculator +from opentelemetry.sdk.trace import ReadableSpan + +from agenta.sdk.types import AgentaNodeDto, AgentaNodesResponse + + +def parse_inline_trace( + spans: Dict[str, ReadableSpan], +): + otel_span_dtos = _parse_readable_spans(spans) + + ############################################################ + ### apis.fastapi.observability.api.otlp_collect_traces() ### + ### ---------------------------------------------------- ### + span_dtos = [ + parse_from_otel_span_dto(otel_span_dto) for otel_span_dto in otel_span_dtos + ] + ### ---------------------------------------------------- ### + ### apis.fastapi.observability.api.otlp_collect_traces() ### + ############################################################ + + ##################################################### + ### services.observability.service.ingest/query() ### + ### --------------------------------------------- ### + span_idx = parse_span_dtos_to_span_idx(span_dtos) + span_id_tree = parse_span_idx_to_span_id_tree(span_idx) + ### --------------------------------------------- ### + ### services.observability.service.ingest/query() ### + ##################################################### + + ############################################### + ### services.observability.service.ingest() ### + ### --------------------------------------- ### + calculate_costs(span_idx) + cumulate_costs(span_id_tree, span_idx) + cumulate_tokens(span_id_tree, span_idx) + ### --------------------------------------- ### + ### services.observability.service.ingest() ### + ############################################### + + ############################################## + ### services.observability.service.query() ### + ### -------------------------------------- ### + connect_children(span_id_tree, span_idx) + root_span_dtos = [span_idx[span_id] for span_id in span_id_tree.keys()] + agenta_span_dtos = [ + parse_to_agenta_span_dto(span_dto) for span_dto in root_span_dtos + ] + ### -------------------------------------- ### + ### services.observability.service.query() ### + ############################################## + + spans = [ + loads( + span_dto.model_dump_json( + exclude_none=True, + exclude_defaults=True, + ) + ) + for span_dto in agenta_span_dtos + ] + inline_trace = AgentaNodesResponse( + version="1.0.0", + nodes=[AgentaNodeDto(**span) for span in spans], + ).model_dump(exclude_none=True, exclude_unset=True) + return inline_trace + + +def _parse_readable_spans( + spans: List[ReadableSpan], +) -> List[OTelSpanDTO]: + otel_span_dtos = list() + + for span in spans: + otel_span_dto = OTelSpanDTO( + context=OTelContextDTO( + trace_id=_int_to_hex(span.get_span_context().trace_id, 128), + span_id=_int_to_hex(span.get_span_context().span_id, 64), + ), + name=span.name, + kind=OTelSpanKind( + "SPAN_KIND_" + + (span.kind if isinstance(span.kind, str) else span.kind.name) + ), + start_time=_timestamp_ns_to_datetime(span.start_time), + end_time=_timestamp_ns_to_datetime(span.end_time), + status_code=OTelStatusCode("STATUS_CODE_" + span.status.status_code.name), + status_message=span.status.description, + attributes=span.attributes, + events=[ + OTelEventDTO( + name=event.name, + timestamp=_timestamp_ns_to_datetime(event.timestamp), + attributes=event.attributes, + ) + for event in span.events + ], + parent=( + OTelContextDTO( + trace_id=_int_to_hex(span.parent.trace_id, 128), + span_id=_int_to_hex(span.parent.span_id, 64), + ) + if span.parent + else None + ), + links=[ + OTelLinkDTO( + context=OTelContextDTO( + trace_id=_int_to_hex(link.context.trace_id, 128), + span_id=_int_to_hex(link.context.span_id, 64), + ), + attributes=link.attributes, + ) + for link in span.links + ], + ) + + otel_span_dtos.append(otel_span_dto) + + return otel_span_dtos + + +def _int_to_hex(integer, bits): + _hex = hex(integer)[2:] + + _hex = _hex.zfill(bits // 4) + + _hex = "0x" + _hex + + return _hex + + +def _timestamp_ns_to_datetime(timestamp_ns): + _datetime = datetime.fromtimestamp( + timestamp_ns / 1_000_000_000, + ).isoformat( + timespec="microseconds", + ) + + return _datetime + + +class LlmTokens(BaseModel): + prompt_tokens: Optional[int] = 0 + completion_tokens: Optional[int] = 0 + total_tokens: Optional[int] = 0 + + +TYPES_WITH_COSTS = [ + "embedding", + "query", + "completion", + "chat", + "rerank", +] + + +def calculate_costs(span_idx: Dict[str, SpanDTO]): + for span in span_idx.values(): + if ( + span.node.type + and span.node.type.name.lower() in TYPES_WITH_COSTS + and span.meta + and span.metrics + ): + model = span.meta.get("response.model") + prompt_tokens = span.metrics.get("unit.tokens.prompt", 0.0) + completion_tokens = span.metrics.get("unit.tokens.completion", 0.0) + + try: + costs = cost_calculator.cost_per_token( + model=model, + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + ) + + if not costs: + continue + + prompt_cost, completion_cost = costs + total_cost = prompt_cost + completion_cost + + span.metrics["unit.costs.prompt"] = prompt_cost + span.metrics["unit.costs.completion"] = completion_cost + span.metrics["unit.costs.total"] = total_cost + + except: # pylint: disable=bare-except + pass diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/processors.py b/services/completion-stateless-sdk/agenta/sdk/tracing/processors.py new file mode 100644 index 0000000000..2c612220cc --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/processors.py @@ -0,0 +1,121 @@ +from typing import Optional, Dict, List + +from opentelemetry.baggage import get_all as get_baggage +from opentelemetry.context import Context +from opentelemetry.sdk.trace import Span +from opentelemetry.sdk.trace.export import ( + SpanExporter, + ReadableSpan, + BatchSpanProcessor, + _DEFAULT_MAX_QUEUE_SIZE, + _DEFAULT_MAX_EXPORT_BATCH_SIZE, +) + +from agenta.sdk.utils.logging import log +from agenta.sdk.tracing.conventions import Reference + + +class TraceProcessor(BatchSpanProcessor): + def __init__( + self, + span_exporter: SpanExporter, + references: Dict[str, str] = None, + max_queue_size: int = None, + schedule_delay_millis: float = None, + max_export_batch_size: int = None, + export_timeout_millis: float = None, + ): + super().__init__( + span_exporter, + _DEFAULT_MAX_QUEUE_SIZE, + 12 * 60 * 60 * 1000, # 12 hours + _DEFAULT_MAX_EXPORT_BATCH_SIZE, + 500, # < 1 second (0.5 seconds) + ) + + self._registry = dict() + self._exporter = span_exporter + self.references = references or dict() + self.spans: Dict[int, List[ReadableSpan]] = dict() + + def on_start( + self, + span: Span, + parent_context: Optional[Context] = None, + ) -> None: + baggage = get_baggage(parent_context) + + for key in self.references.keys(): + span.set_attribute(f"ag.refs.{key}", self.references[key]) + + for key in baggage.keys(): + if key.startswith("ag.refs."): + _key = key.replace("ag.refs.", "") + if _key in [_.value for _ in Reference.__members__.values()]: + span.set_attribute(key, baggage[key]) + + if span.context.trace_id not in self._registry: + self._registry[span.context.trace_id] = dict() + + self._registry[span.context.trace_id][span.context.span_id] = True + + def on_end( + self, + span: ReadableSpan, + ): + if self.done: + return + + if span.context.trace_id not in self.spans: + self.spans[span.context.trace_id] = list() + + self.spans[span.context.trace_id].append(span) + + del self._registry[span.context.trace_id][span.context.span_id] + + if len(self._registry[span.context.trace_id]) == 0: + self.export(span.context.trace_id) + + def export( + self, + trace_id: int, + ): + spans = self.spans[trace_id] + + for span in spans: + self.queue.appendleft(span) + + with self.condition: + self.condition.notify() + + del self.spans[trace_id] + + def force_flush( + self, + timeout_millis: int = None, + ) -> bool: + ret = super().force_flush(timeout_millis) + + if not ret: + log.warning("Agenta - Skipping export due to timeout.") + + def is_ready( + self, + trace_id: Optional[int] = None, + ) -> bool: + is_ready = True + + try: + is_ready = self._exporter.is_ready(trace_id) + except: # pylint: disable=bare-except + pass + + return is_ready + + def fetch( + self, + trace_id: Optional[int] = None, + ) -> Dict[str, ReadableSpan]: + trace = self._exporter.fetch(trace_id) # type: ignore + + return trace diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/spans.py b/services/completion-stateless-sdk/agenta/sdk/tracing/spans.py new file mode 100644 index 0000000000..eaee49b0f8 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/spans.py @@ -0,0 +1,136 @@ +from typing import Optional, Union, Any, Dict + +from opentelemetry.trace import SpanContext +from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.sdk.trace import Span + +from agenta.sdk.tracing.attributes import serialize + + +class CustomSpan(Span): # INHERITANCE FOR TYPING ONLY + def __init__( + self, + span: Span, + ) -> None: + super().__init__( # INHERITANCE FOR TYPING ONLY + name=span.name, + context=span.context, + parent=span.parent, + sampler=span._sampler, + trace_config=span._trace_config, + resource=span.resource, + attributes=span.attributes, + events=span.events, + links=span.links, + kind=span.kind, + span_processor=span._span_processor, + instrumentation_info=span.instrumentation_info, + record_exception=span._record_exception, + set_status_on_exception=span._set_status_on_exception, + limits=span._limits, + instrumentation_scope=span.instrumentation_scope, + ) + + self._span = span + + ## --- PROXY METHODS --- ## + + def get_span_context(self): + return self._span.get_span_context() + + def is_recording(self) -> bool: + return self._span.is_recording() + + def update_name( + self, + name: str, + ) -> None: + self._span.update_name(name) + + def set_status( + self, + status: Union[Status, StatusCode], + description: Optional[str] = None, + ) -> None: + self._span.set_status( + status=status, + description=description, + ) + + def end(self) -> None: + self._span.end() + + ## --- CUSTOM METHODS W/ ATTRIBUTES SERALIZATION --- ## + + def set_attributes( + self, + attributes: Dict[str, Any], + namespace: Optional[str] = None, + max_depth: Optional[int] = None, + ) -> None: + self._span.set_attributes( + attributes=serialize( + namespace=namespace, + attributes=attributes, + max_depth=max_depth, + ) + ) + + def set_attribute( + self, + key: str, + value: Any, + namespace: Optional[str] = None, + ) -> None: + self.set_attributes( + attributes={key: value}, + namespace=namespace, + ) + + def add_event( + self, + name: str, + attributes: Optional[Dict[str, Any]] = None, + timestamp: Optional[int] = None, + namespace: Optional[str] = None, + ) -> None: + self._span.add_event( + name=name, + attributes=serialize( + namespace=namespace, + attributes=attributes, + ), + timestamp=timestamp, + ) + + def add_link( + self, + context: SpanContext, + attributes: Optional[Dict[str, Any]] = None, + namespace: Optional[str] = None, + ) -> None: + self._span.add_link( + context=context, + attributes=serialize( + namespace=namespace, + attributes=attributes, + ), + ) + + def record_exception( + self, + exception: BaseException, + attributes: Optional[Dict[str, Any]] = None, + timestamp: Optional[int] = None, + escaped: bool = False, + namespace: Optional[str] = None, + ) -> None: + self._span.record_exception( + exception=exception, + attributes=serialize( + namespace=namespace, + attributes=attributes, + ), + timestamp=timestamp, + escaped=escaped, + ) diff --git a/services/completion-stateless-sdk/agenta/sdk/tracing/tracing.py b/services/completion-stateless-sdk/agenta/sdk/tracing/tracing.py new file mode 100644 index 0000000000..0e92bb9d19 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/tracing/tracing.py @@ -0,0 +1,237 @@ +from typing import Optional, Any, Dict, Callable +from enum import Enum + +from httpx import get as check + +from opentelemetry.trace import ( + get_current_span, + set_tracer_provider, + get_tracer_provider, + Status, + StatusCode, +) +from opentelemetry.sdk.trace import Span, Tracer, TracerProvider +from opentelemetry.sdk.resources import Resource + +from agenta.sdk.utils.singleton import Singleton +from agenta.sdk.utils.exceptions import suppress +from agenta.sdk.utils.logging import log +from agenta.sdk.tracing.processors import TraceProcessor +from agenta.sdk.tracing.exporters import InlineExporter, OTLPExporter +from agenta.sdk.tracing.spans import CustomSpan +from agenta.sdk.tracing.inline import parse_inline_trace +from agenta.sdk.tracing.conventions import Reference, is_valid_attribute_key + + +class Tracing(metaclass=Singleton): + VERSION = "0.1.0" + + Status = Status + StatusCode = StatusCode + + def __init__( + self, + url: str, + redact: Optional[Callable[..., Any]] = None, + redact_on_error: Optional[bool] = True, + ) -> None: + # ENDPOINT (OTLP) + self.otlp_url = url + # HEADERS (OTLP) + self.headers: Dict[str, str] = dict() + # REFERENCES + self.references: Dict[str, str] = dict() + # CREDENTIALS + self.credentials: Dict[int, str] = dict() + + # TRACER PROVIDER + self.tracer_provider: Optional[TracerProvider] = None + # TRACE PROCESSORS -- INLINE + self.inline: Optional[TraceProcessor] = None + # TRACER + self.tracer: Optional[Tracer] = None + # INLINE SPANS for INLINE TRACES (INLINE PROCESSOR) + self.inline_spans: Dict[str, Any] = dict() + + # REDACT + self.redact = redact + self.redact_on_error = redact_on_error + + # PUBLIC + + def configure( + self, + api_key: Optional[str] = None, + service_id: Optional[str] = None, + # DEPRECATING + app_id: Optional[str] = None, + ): + # HEADERS (OTLP) + if api_key: + self.headers["Authorization"] = f"ApiKey {api_key}" + # REFERENCES + if service_id: + self.references["service.id"] = service_id + if app_id: + self.references["application.id"] = app_id + + # TRACER PROVIDER + self.tracer_provider = TracerProvider( + resource=Resource(attributes={"service.name": "agenta-sdk"}) + ) + # TRACE PROCESSORS -- INLINE + self.inline = TraceProcessor( + InlineExporter( + registry=self.inline_spans, + ), + references=self.references, + ) + self.tracer_provider.add_span_processor(self.inline) + # TRACE PROCESSORS -- OTLP + try: + log.info( + "Agenta - OLTP URL: %s", + self.otlp_url, + ) + # check( + # self.otlp_url, + # headers=self.headers, + # timeout=1, + # ) + + _otlp = TraceProcessor( + OTLPExporter( + endpoint=self.otlp_url, + headers=self.headers, + credentials=self.credentials, + ), + references=self.references, + ) + + self.tracer_provider.add_span_processor(_otlp) + except: # pylint: disable=bare-except + log.warning("Agenta - OLTP unreachable, skipping exports.") + + # GLOBAL TRACER PROVIDER -- INSTRUMENTATION LIBRARIES + set_tracer_provider(self.tracer_provider) + # TRACER + self.tracer: Tracer = self.tracer_provider.get_tracer("agenta.tracer") + + def get_current_span(self): + _span = None + + with suppress(): + _span = get_current_span() + + if _span.is_recording(): + return CustomSpan(_span) + + return _span + + def store_internals( + self, + attributes: Dict[str, Any], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + span.set_attributes( + attributes={"internals": attributes}, + namespace="data", + ) + + def store_refs( + self, + refs: Dict[str, str], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + for key in refs.keys(): + if key in [_.value for _ in Reference.__members__.values()]: + # ADD REFERENCE TO THIS SPAN + span.set_attribute( + key.value if isinstance(key, Enum) else key, + refs[key], + namespace="refs", + ) + + # AND TO ALL SPANS CREATED AFTER THIS ONE + self.references[key] = refs[key] + # TODO: THIS SHOULD BE REPLACED BY A TRACE CONTEXT !!! + + def store_meta( + self, + meta: Dict[str, Any], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + for key in meta.keys(): + if is_valid_attribute_key(key): + span.set_attribute( + key, + meta[key], + namespace="meta", + ) + + def store_metrics( + self, + metrics: Dict[str, Any], + span: Optional[Span] = None, + ): + with suppress(): + if span is None: + span = self.get_current_span() + + for key in metrics.keys(): + if is_valid_attribute_key(key): + span.set_attribute( + key, + metrics[key], + namespace="metrics", + ) + + def is_inline_trace_ready( + self, + trace_id: Optional[int] = None, + ) -> bool: + is_ready = True + + with suppress(): + if trace_id is not None: + is_ready = self.inline.is_ready(trace_id) + + return is_ready + + def get_inline_trace( + self, + trace_id: Optional[int] = None, + ) -> Dict[str, Any]: + _inline_trace = {} + + with suppress(): + is_ready = self.inline.is_ready(trace_id) + + if is_ready is True: + otel_spans = self.inline.fetch(trace_id) + + if otel_spans: + _inline_trace = parse_inline_trace(otel_spans) + + return _inline_trace + + +def get_tracer( + tracing: Tracing, +) -> Tracer: + if tracing is None or tracing.tracer is None or tracing.tracer_provider is None: + return get_tracer_provider().get_tracer("default.tracer") + + return tracing.tracer diff --git a/services/completion-stateless-sdk/agenta/sdk/types.py b/services/completion-stateless-sdk/agenta/sdk/types.py new file mode 100644 index 0000000000..cefe92825a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/types.py @@ -0,0 +1,250 @@ +import json +from dataclasses import dataclass +from typing import Dict, List, Optional, Any, Union + +from pydantic import ConfigDict, BaseModel, HttpUrl + +from agenta.client.backend.types.agenta_node_dto import AgentaNodeDto +from agenta.client.backend.types.agenta_nodes_response import AgentaNodesResponse + + +@dataclass +class MultipleChoice: + choices: Union[List[str], Dict[str, List[str]]] + + +class InFile: + def __init__(self, file_name: str, file_path: str): + self.file_name = file_name + self.file_path = file_path + + +class LLMTokenUsage(BaseModel): + completion_tokens: int + prompt_tokens: int + total_tokens: int + + +class BaseResponse(BaseModel): + version: Optional[str] = "3.0" + data: Optional[Union[str, Dict[str, Any]]] = None + tree: Optional[AgentaNodesResponse] = None + + +class DictInput(dict): + def __new__(cls, default_keys: Optional[List[str]] = None): + instance = super().__new__(cls, default_keys) + if default_keys is None: + default_keys = [] + instance.data = [key for key in default_keys] # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "dict"} + + +class TextParam(str): + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "text", "type": "string"} + + +class BinaryParam(int): + def __new__(cls, value: bool = False): + instance = super().__new__(cls, int(value)) + instance.default = value # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return { + "x-parameter": "bool", + "type": "boolean", + } + + +class IntParam(int): + def __new__(cls, default: int = 6, minval: float = 1, maxval: float = 10): + instance = super().__new__(cls, default) + instance.minval = minval # type: ignore + instance.maxval = maxval # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "int", "type": "integer"} + + +class FloatParam(float): + def __new__(cls, default: float = 0.5, minval: float = 0.0, maxval: float = 1.0): + instance = super().__new__(cls, default) + instance.default = default # type: ignore + instance.minval = minval # type: ignore + instance.maxval = maxval # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "float", "type": "number"} + + +class MultipleChoiceParam(str): + def __new__( + cls, default: Optional[str] = None, choices: Optional[List[str]] = None + ): + if default is not None and type(default) is list: + raise ValueError( + "The order of the parameters for MultipleChoiceParam is wrong! It's MultipleChoiceParam(default, choices) and not the opposite" + ) + + if not default and choices is not None: + # if a default value is not provided, + # set the first value in the choices list + default = choices[0] + + if default is None and not choices: + # raise error if no default value or choices is provided + raise ValueError("You must provide either a default value or choices") + + instance = super().__new__(cls, default) + instance.choices = choices # type: ignore + instance.default = default # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "choice", "type": "string", "enum": []} + + +class GroupedMultipleChoiceParam(str): + def __new__( + cls, + default: Optional[str] = None, + choices: Optional[Dict[str, List[str]]] = None, + ): + if choices is None: + choices = {} + if default and not any( + default in choice_list for choice_list in choices.values() + ): + if not choices: + print( + f"Warning: Default value {default} provided but choices are empty." + ) + else: + raise ValueError( + f"Default value {default} is not in the provided choices" + ) + + if not default: + default_selected_choice = next( + (choices for choices in choices.values()), None + ) + if default_selected_choice: + default = default_selected_choice[0] + + instance = super().__new__(cls, default) + instance.choices = choices # type: ignore + instance.default = default # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return { + "x-parameter": "grouped_choice", + "type": "string", + } + + +class MessagesInput(list): + """Messages Input for Chat-completion. + + Args: + messages (List[Dict[str, str]]): The list of messages inputs. + Required. Each message should be a dictionary with "role" and "content" keys. + + Raises: + ValueError: If `messages` is not specified or empty. + + """ + + def __new__(cls, messages: List[Dict[str, str]] = []): + instance = super().__new__(cls) + instance.default = messages # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "messages", "type": "array"} + + +class FileInputURL(HttpUrl): + def __new__(cls, url: str): + instance = super().__new__(cls, url) + instance.default = url # type: ignore + return instance + + @classmethod + def __schema_type_properties__(cls) -> dict: + return {"x-parameter": "file_url", "type": "string"} + + +class Context(BaseModel): + model_config = ConfigDict(extra="allow") + + def to_json(self): + return self.model_dump() + + @classmethod + def from_json(cls, json_str: str): + data = json.loads(json_str) + return cls(**data) + + +class ReferencesResponse(BaseModel): + app_id: Optional[str] = None + app_slug: Optional[str] = None + variant_id: Optional[str] = None + variant_slug: Optional[str] = None + variant_version: Optional[int] = None + environment_id: Optional[str] = None + environment_slug: Optional[str] = None + environment_version: Optional[int] = None + + def __str__(self): + return str(self.model_dump(exclude_none=True)) + + +class LifecyclesResponse(ReferencesResponse): + committed_at: Optional[str] = None + committed_by: Optional[str] = None + committed_by_id: Optional[str] = None + deployed_at: Optional[str] = None + deployed_by: Optional[str] = None + deployed_by_id: Optional[str] = None + + def __str__(self): + return self.model_dump_json(indent=4) + + def __repr__(self): + return self.__str__() + + +class ConfigurationResponse(LifecyclesResponse): + params: Dict[str, Any] + + +class DeploymentResponse(LifecyclesResponse): + pass + + +class Prompt(BaseModel): + temperature: float + model: str + max_tokens: int + prompt_system: str + prompt_user: str + top_p: float + frequency_penalty: float + presence_penalty: float diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/__init__.py b/services/completion-stateless-sdk/agenta/sdk/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/constants.py b/services/completion-stateless-sdk/agenta/sdk/utils/constants.py new file mode 100644 index 0000000000..fc2e1ae25d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/constants.py @@ -0,0 +1 @@ +TRUTHY = {"true", "1", "t", "y", "yes", "on", "enable", "enabled"} diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/costs.py b/services/completion-stateless-sdk/agenta/sdk/utils/costs.py new file mode 100644 index 0000000000..667ae27cda --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/costs.py @@ -0,0 +1,169 @@ +# https://raw.githubusercontent.com/langchain-ai/langchain/23eb480c3866db8693a3a2d63b787c898c54bb35/libs/community/langchain_community/callbacks/openai_info.py +MODEL_COST_PER_1K_TOKENS = { + # GPT-4 input + "gpt-4": 0.03, + "gpt-4-0314": 0.03, + "gpt-4-0613": 0.03, + "gpt-4-32k": 0.06, + "gpt-4-32k-0314": 0.06, + "gpt-4-32k-0613": 0.06, + "gpt-4-vision-preview": 0.01, + "gpt-4-1106-preview": 0.01, + # GPT-4 output + "gpt-4-completion": 0.06, + "gpt-4-0314-completion": 0.06, + "gpt-4-0613-completion": 0.06, + "gpt-4-32k-completion": 0.12, + "gpt-4-32k-0314-completion": 0.12, + "gpt-4-32k-0613-completion": 0.12, + "gpt-4-vision-preview-completion": 0.03, + "gpt-4-1106-preview-completion": 0.03, + # GPT-3.5 input + "gpt-3.5-turbo": 0.0015, + "gpt-3.5-turbo-0301": 0.0015, + "gpt-3.5-turbo-0613": 0.0015, + "gpt-3.5-turbo-1106": 0.001, + "gpt-3.5-turbo-instruct": 0.0015, + "gpt-3.5-turbo-16k": 0.003, + "gpt-3.5-turbo-16k-0613": 0.003, + # GPT-3.5 output + "gpt-3.5-turbo-completion": 0.002, + "gpt-3.5-turbo-0301-completion": 0.002, + "gpt-3.5-turbo-0613-completion": 0.002, + "gpt-3.5-turbo-1106-completion": 0.002, + "gpt-3.5-turbo-instruct-completion": 0.002, + "gpt-3.5-turbo-16k-completion": 0.004, + "gpt-3.5-turbo-16k-0613-completion": 0.004, + # Azure GPT-35 input + "gpt-35-turbo": 0.0015, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0301": 0.0015, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0613": 0.0015, + "gpt-35-turbo-instruct": 0.0015, + "gpt-35-turbo-16k": 0.003, + "gpt-35-turbo-16k-0613": 0.003, + # Azure GPT-35 output + "gpt-35-turbo-completion": 0.002, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0301-completion": 0.002, # Azure OpenAI version of ChatGPT + "gpt-35-turbo-0613-completion": 0.002, + "gpt-35-turbo-instruct-completion": 0.002, + "gpt-35-turbo-16k-completion": 0.004, + "gpt-35-turbo-16k-0613-completion": 0.004, + # Others + "text-embedding-ada-002": 0.1, + "text-ada-002": 0.1, + "adav2": 0.1, + "text-ada-001": 0.0004, + "ada": 0.0004, + "text-babbage-001": 0.0005, + "babbage": 0.0005, + "text-curie-001": 0.002, + "curie": 0.002, + "text-davinci-003": 0.02, + "text-davinci-002": 0.02, + "code-davinci-002": 0.02, + # Fine Tuned input + "babbage-002-finetuned": 0.0016, + "davinci-002-finetuned": 0.012, + "gpt-3.5-turbo-0613-finetuned": 0.012, + # Fine Tuned output + "babbage-002-finetuned-completion": 0.0016, + "davinci-002-finetuned-completion": 0.012, + "gpt-3.5-turbo-0613-finetuned-completion": 0.016, + # Azure Fine Tuned input + "babbage-002-azure-finetuned": 0.0004, + "davinci-002-azure-finetuned": 0.002, + "gpt-35-turbo-0613-azure-finetuned": 0.0015, + # Azure Fine Tuned output + "babbage-002-azure-finetuned-completion": 0.0004, + "davinci-002-azure-finetuned-completion": 0.002, + "gpt-35-turbo-0613-azure-finetuned-completion": 0.002, + # Legacy fine-tuned models + "ada-finetuned-legacy": 0.0016, + "babbage-finetuned-legacy": 0.0024, + "curie-finetuned-legacy": 0.012, + "davinci-finetuned-legacy": 0.12, +} + + +def standardize_model_name( + model_name: str, + is_completion: bool = False, +) -> str: + """ + Standardize the model name to a format that can be used in the OpenAI API. + + Args: + model_name: Model name to standardize. + is_completion: Whether the model is used for completion or not. + Defaults to False. + + Returns: + Standardized model name. + """ + + model_name = model_name.lower() + if ".ft-" in model_name: + model_name = model_name.split(".ft-")[0] + "-azure-finetuned" + if ":ft-" in model_name: + model_name = model_name.split(":")[0] + "-finetuned-legacy" + if "ft:" in model_name: + model_name = model_name.split(":")[1] + "-finetuned" + if is_completion and ( + model_name.startswith("gpt-4") + or model_name.startswith("gpt-3.5") + or model_name.startswith("gpt-35") + or ("finetuned" in model_name and "legacy" not in model_name) + ): + return model_name + "-completion" + else: + return model_name + + +def get_openai_token_cost_for_model( + model_name: str, num_tokens: int, is_completion: bool = False +) -> float: + """ + Get the cost in USD for a given model and number of tokens. + + Args: + model_name: Name of the model + num_tokens: Number of tokens. + is_completion: Whether the model is used for completion or not. + Defaults to False. + + Returns: + Cost in USD. + """ + + model_name = standardize_model_name(model_name, is_completion=is_completion) + if model_name not in MODEL_COST_PER_1K_TOKENS: + raise ValueError( + f"Unknown model: {model_name}. Please provide a valid OpenAI model name." + "Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys()) + ) + return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000) + + +def calculate_token_usage(model_name: str, token_usage: dict) -> float: + """Calculates the total cost of using a language model based on the model name and token + usage. + + Args: + model_name: The name of the model used to determine the cost per token. + token_usage: Contains information about the usage of tokens for a particular model. + + Returns: + Total cost of using a model. + """ + + completion_tokens = token_usage.get("completion_tokens", 0) + prompt_tokens = token_usage.get("prompt_tokens", 0) + model_name = standardize_model_name(model_name) + if model_name in MODEL_COST_PER_1K_TOKENS: + completion_cost = get_openai_token_cost_for_model( + model_name, completion_tokens, is_completion=True + ) + prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) + total_cost = prompt_cost + completion_cost + return total_cost + return 0 diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/exceptions.py b/services/completion-stateless-sdk/agenta/sdk/utils/exceptions.py new file mode 100644 index 0000000000..a1d5cb3793 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/exceptions.py @@ -0,0 +1,59 @@ +from contextlib import AbstractContextManager +from traceback import format_exc +from functools import wraps +from inspect import iscoroutinefunction + +from agenta.sdk.utils.logging import log + + +def display_exception(message: str): + _len = len("Agenta - ") + len(message) + len(":") + _bar = "-" * _len + + log.warning(_bar) + log.warning("Agenta - %s:", message) + log.warning(_bar) + log.warning(format_exc().strip("\n")) + log.warning(_bar) + + +class suppress(AbstractContextManager): # pylint: disable=invalid-name + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, exc_tb): + if exc_type is not None: + display_exception("Exception (suppressed)") + + return True + + +def handle_exceptions(): + def decorator(func): + is_coroutine_function = iscoroutinefunction(func) + + @wraps(func) + async def async_wrapper(*args, **kwargs): + try: + return await func(*args, **kwargs) + + except Exception as e: + display_exception("Exception") + + raise e + + @wraps(func) + def sync_wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + display_exception("Exception") + + raise e + + return async_wrapper if is_coroutine_function else sync_wrapper + + return decorator diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/globals.py b/services/completion-stateless-sdk/agenta/sdk/utils/globals.py new file mode 100644 index 0000000000..ceae076427 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/globals.py @@ -0,0 +1,10 @@ +import agenta as ag + + +def set_global(config=None, tracing=None): + """Allows usage of agenta.config and agenta.tracing in the user's code.""" + + if config is not None: + ag.config = config + if tracing is not None: + ag.tracing = tracing diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/helpers.py b/services/completion-stateless-sdk/agenta/sdk/utils/helpers.py new file mode 100644 index 0000000000..da0aac650f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/helpers.py @@ -0,0 +1,8 @@ +import importlib.metadata + + +def get_current_version(): + """Returns the current version of Agenta's SDK.""" + + version = importlib.metadata.version("agenta") + return version diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/logging.py b/services/completion-stateless-sdk/agenta/sdk/utils/logging.py new file mode 100644 index 0000000000..cc65d67e7c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/logging.py @@ -0,0 +1,21 @@ +import logging +from os import getenv + + +class Logger: + def __init__(self, name="agenta.logger", level=logging.WARNING): + if getenv("AGENTA_DEBUG"): + level = logging.DEBUG + + self.logger = logging.getLogger(name) + self.logger.setLevel(level) + + console_handler = logging.StreamHandler() + self.logger.addHandler(console_handler) + + @property + def log(self) -> logging.Logger: + return self.logger + + +log = Logger().log diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/preinit.py b/services/completion-stateless-sdk/agenta/sdk/utils/preinit.py new file mode 100644 index 0000000000..f039b149f9 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/preinit.py @@ -0,0 +1,41 @@ +from typing import Any, Optional +from dotenv import load_dotenv + +# from .context import setup_db + +load_dotenv() +# setup_db() + + +class PreInitObject: + """Dummy object that raises an error when accessed a class before agenta.init() is called.""" + + def __init__(self, name: str, destination: Optional[Any] = None) -> None: + self._name = name + + if destination is not None: + self.__doc__ = destination.__doc__ + + def __getitem__(self, key: str) -> None: + raise RuntimeError( + f"You must call agenta.init() before accessing {self._name}[{key!r}]" + ) + + def __setitem__(self, key: str, value: Any) -> Any: + raise RuntimeError( + f"You must call agenta.init() before setting {self._name}[{key!r}]" + ) + + def __setattr__(self, key: str, value: Any) -> Any: + if not key.startswith("_"): + raise RuntimeError( + f"You must call agenta.init() before {self._name}[{key!r}]" + ) + else: + return object.__setattr__(self, key, value) + + def __getattr__(self, key: str) -> Any: + if not key.startswith("_"): + raise RuntimeError(f"You must call agenta.init() before {self._name}.{key}") + else: + raise AttributeError diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/singleton.py b/services/completion-stateless-sdk/agenta/sdk/utils/singleton.py new file mode 100644 index 0000000000..5140515331 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/singleton.py @@ -0,0 +1,13 @@ +from threading import Lock + + +class Singleton(type): + _instances = {} + + _lock: Lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + cls._instances[cls] = super().__call__(*args, **kwargs) + return cls._instances[cls] diff --git a/services/completion-stateless-sdk/agenta/sdk/utils/timing.py b/services/completion-stateless-sdk/agenta/sdk/utils/timing.py new file mode 100644 index 0000000000..c73b5f210d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/sdk/utils/timing.py @@ -0,0 +1,58 @@ +import time +from functools import wraps + +from agenta.sdk.utils.logging import log + + +def timeit(func): + @wraps(func) + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + + execution_time = end_time - start_time + + if execution_time < 1e-3: + time_value = execution_time * 1e6 + unit = "us" + elif execution_time < 1: + time_value = execution_time * 1e3 + unit = "ms" + else: + time_value = execution_time + unit = "s" + + class_name = args[0].__class__.__name__ if args else None + + log.info(f"'{class_name}.{func.__name__}' executed in {time_value:.4f} {unit}.") + return result + + return wrapper + + +def atimeit(func): + @wraps(func) + async def wrapper(*args, **kwargs): + start_time = time.time() + result = await func(*args, **kwargs) + end_time = time.time() + + execution_time = end_time - start_time + + if execution_time < 1e-3: + time_value = execution_time * 1e6 + unit = "us" + elif execution_time < 1: + time_value = execution_time * 1e3 + unit = "ms" + else: + time_value = execution_time + unit = "s" + + class_name = args[0].__class__.__name__ if args else None + + log.info(f"'{class_name}.{func.__name__}' executed in {time_value:.4f} {unit}.") + return result + + return wrapper diff --git a/services/completion-stateless-sdk/agenta/templates/compose_email/README.md b/services/completion-stateless-sdk/agenta/templates/compose_email/README.md new file mode 100644 index 0000000000..757455e2ca --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/compose_email/README.md @@ -0,0 +1,9 @@ +# Using this template + +Please make sure to create a `.env` file with your OpenAI API key before running the app. +OPENAI_API_KEY=sk-xxxxxxx + +You can find your keys here: +https://platform.openai.com/account/api-keys + +Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/compose_email/app.py b/services/completion-stateless-sdk/agenta/templates/compose_email/app.py new file mode 100644 index 0000000000..3883a31725 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/compose_email/app.py @@ -0,0 +1,63 @@ +import agenta as ag +from langchain.chains import LLMChain +from langchain.llms import OpenAI +from langchain.prompts import PromptTemplate + +default_prompt = """ +**Write an email** from {from_sender} to {to_receiver} with the designated tone and style: {email_style}. The primary content of the email is: {email_content}. + +Use the following format: +Subject: + + + +**Procedure**: + +**(1) Determine the primary talking points of the email:** +1. Identify the central theme of the provided content. +2. Extract secondary messages or supporting points. + +**(2) Frame sentences for each talking point, keeping in mind the given tone and style {{ style }}:** +3. Create a compelling opening sentence that sets the tone and introduces the main theme. +4. Formulate sentences that add depth or context to each of the previously identified talking points. + +**(3) Draft the initial version of the email:** +Use the sentences crafted in the previous step to compose a coherent and engaging email. Ensure that the flow feels natural and that each sentence transitions smoothly to the next. + +**(4) Analyze the email and list ways to refine it:** +5. Identify areas where the message might be unclear or could benefit from additional information. +6. Consider places where the language or tone might be enhanced to be more persuasive or emotive. +7. Evaluate if the email adheres to the style directive and, if not, identify deviations. + +**(5) Re-write the email by applying the insights gained:** +Rework the initial draft, incorporating the improvements identified in the previous step. Aim to present the message as effectively as possible while strictly adhering to the prescribed tone and style. + +""" + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.9), prompt_template=ag.TextParam(default_prompt) +) + + +@ag.entrypoint +def generate( + from_sender: str, + to_receiver: str, + email_style: str, + email_content: str, +) -> str: + llm = OpenAI(temperature=ag.config.temperature) + prompt = PromptTemplate( + input_variables=["from_sender", "to_receiver", "email_style", "email_content"], + template=ag.config.prompt_template, + ) + chain = LLMChain(llm=llm, prompt=prompt) + output = chain.run( + from_sender=from_sender, + to_receiver=to_receiver, + email_style=email_style, + email_content=email_content, + ) + + return output diff --git a/services/completion-stateless-sdk/agenta/templates/compose_email/env.example b/services/completion-stateless-sdk/agenta/templates/compose_email/env.example new file mode 100644 index 0000000000..0bd3c56d64 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/compose_email/env.example @@ -0,0 +1,2 @@ +# Rename this file to .env +OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx diff --git a/services/completion-stateless-sdk/agenta/templates/compose_email/requirements.txt b/services/completion-stateless-sdk/agenta/templates/compose_email/requirements.txt new file mode 100644 index 0000000000..2f1e01b99f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/compose_email/requirements.txt @@ -0,0 +1,3 @@ +langchain +openai +agenta \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/compose_email/template.toml b/services/completion-stateless-sdk/agenta/templates/compose_email/template.toml new file mode 100644 index 0000000000..3f5f2b503a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/compose_email/template.toml @@ -0,0 +1 @@ +short_desc="Simple app that composes an email." \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/README.md b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/README.md new file mode 100644 index 0000000000..757455e2ca --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/README.md @@ -0,0 +1,9 @@ +# Using this template + +Please make sure to create a `.env` file with your OpenAI API key before running the app. +OPENAI_API_KEY=sk-xxxxxxx + +You can find your keys here: +https://platform.openai.com/account/api-keys + +Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/app.py b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/app.py new file mode 100644 index 0000000000..633d59d38c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/app.py @@ -0,0 +1,53 @@ +import agenta as ag +from openai import OpenAI + +client = OpenAI() +import json + +default_prompt = """You are a world class algorithm for extracting information in structured formats. Extract information and create a valid JSON from the following input: {text}""" +function_json_string = """ +{ + "name": "extract_information", + "description": "Extract information from user-provided text", + "parameters": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "The text to extract information from" + } + } + } +} +""" + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.9), + prompt_template=ag.TextParam(default_prompt), + function_json=ag.TextParam(function_json_string), +) + + +@ag.entrypoint +def generate( + text: str, +) -> str: + messages = [ + { + "role": "user", + "content": ag.config.prompt_template.format(text=text), + }, + ] + + function = json.loads(ag.config.function_json) + + response = client.chat.completions.create( + model="gpt-3.5-turbo-0613", + messages=messages, + temperature=ag.config.temperature, + functions=[function], + ) + + output = str(response["choices"][0]["message"]["function_call"]) + return output diff --git a/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/env.example b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/env.example new file mode 100644 index 0000000000..0bd3c56d64 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/env.example @@ -0,0 +1,2 @@ +# Rename this file to .env +OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx diff --git a/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/requirements.txt b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/requirements.txt new file mode 100644 index 0000000000..2f1e01b99f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/requirements.txt @@ -0,0 +1,3 @@ +langchain +openai +agenta \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/template.toml b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/template.toml new file mode 100644 index 0000000000..41a613d378 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/extract_data_to_json/template.toml @@ -0,0 +1 @@ +short_desc="Simple app that extracts data to JSON from text" \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/simple_prompt/README.md b/services/completion-stateless-sdk/agenta/templates/simple_prompt/README.md new file mode 100644 index 0000000000..757455e2ca --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/simple_prompt/README.md @@ -0,0 +1,9 @@ +# Using this template + +Please make sure to create a `.env` file with your OpenAI API key before running the app. +OPENAI_API_KEY=sk-xxxxxxx + +You can find your keys here: +https://platform.openai.com/account/api-keys + +Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/simple_prompt/app.py b/services/completion-stateless-sdk/agenta/templates/simple_prompt/app.py new file mode 100644 index 0000000000..8cb809ee55 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/simple_prompt/app.py @@ -0,0 +1,24 @@ +import agenta as ag +from langchain.chains import LLMChain +from langchain.llms import OpenAI +from langchain.prompts import PromptTemplate + +default_prompt = "What is a good name for a company that makes {product}?" + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.9), + prompt_template=ag.TextParam(default_prompt), +) + + +@ag.entrypoint +def generate(product: str) -> str: + llm = OpenAI(temperature=ag.config.temperature) + prompt = PromptTemplate( + input_variables=["product"], template=ag.config.prompt_template + ) + chain = LLMChain(llm=llm, prompt=prompt) + output = chain.run(product=product) + + return output diff --git a/services/completion-stateless-sdk/agenta/templates/simple_prompt/env.example b/services/completion-stateless-sdk/agenta/templates/simple_prompt/env.example new file mode 100644 index 0000000000..0bd3c56d64 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/simple_prompt/env.example @@ -0,0 +1,2 @@ +# Rename this file to .env +OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx diff --git a/services/completion-stateless-sdk/agenta/templates/simple_prompt/requirements.txt b/services/completion-stateless-sdk/agenta/templates/simple_prompt/requirements.txt new file mode 100644 index 0000000000..2f1e01b99f --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/simple_prompt/requirements.txt @@ -0,0 +1,3 @@ +langchain +openai +agenta \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/templates/simple_prompt/template.toml b/services/completion-stateless-sdk/agenta/templates/simple_prompt/template.toml new file mode 100644 index 0000000000..6b1d9b21c1 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/templates/simple_prompt/template.toml @@ -0,0 +1 @@ +short_desc="Simple app that uses one prompt using langchain" \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/tests/__init__.py b/services/completion-stateless-sdk/agenta/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_baby_name.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_baby_name.py new file mode 100644 index 0000000000..41b4577af6 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_baby_name.py @@ -0,0 +1,46 @@ +import agenta as ag +from agenta import FloatParam, TextParam +from openai import OpenAI +from pydantic import BaseModel, Field + +client = OpenAI() +import os + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init() + + +class BabyConfig(BaseModel): + temperature: float = Field(default=0.2) + prompt_template: str = Field(default=default_prompt) + + +@ag.route("/", config_schema=BabyConfig) +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name. + """ + config = ag.ConfigManager.get_from_route(schema=BabyConfig) + prompt = config.prompt_template.format(country=country, gender=gender) + + chat_completion = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": prompt}], + temperature=config.temperature, + ) + token_usage = chat_completion.usage.dict() + return { + "message": chat_completion.choices[0].message.content, + **{"usage": token_usage}, + "cost": ag.calculate_token_usage("gpt-3.5-turbo", token_usage), + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_chat_prompt.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_chat_prompt.py new file mode 100644 index 0000000000..c752f4a713 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_chat_prompt.py @@ -0,0 +1,103 @@ +from typing import Annotated + +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models +from pydantic import BaseModel, Field + +litellm.drop_params = True + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init(config_fname="config.toml") + + +class MyConfig(BaseModel): + temperature: float = Field(default=1, ge=0.0, le=2.0) + model: Annotated[str, ag.MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo" + ) + max_tokens: int = Field(default=-1, ge=-1, le=4000) + prompt_system: str = Field(default=prompts["system_prompt"]) + prompt_user: str = Field(default=prompts["user_prompt"]) + top_p: float = Field(default=1) + frequence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0) + presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0) + force_json: bool = Field(default=False) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + response_format = ( + {"type": "json_object"} + if config.force_json and config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + + max_tokens = config.max_tokens if config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = config.frequence_penalty + completion_params["presence_penalty"] = config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": config.temperature, + "max_tokens": max_tokens, + "top_p": config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=config.model + ), + } + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + try: + prompt_user = config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = config.prompt_user + try: + prompt_system = config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = config.prompt_system + + # SET MAX TOKENS - via completion() + if config.force_json and config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py new file mode 100644 index 0000000000..ea4ad8e45d --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/new_single_prompt.py @@ -0,0 +1,61 @@ +from typing import Annotated, Any, Dict, List + +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models +from pydantic import BaseModel, Field + +litellm.drop_params = True + +SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." + +ag.init() + + +class MyConfig(BaseModel): + temperature: float = Field(default=0.2, le=1, ge=0) + model: Annotated[str, ag.MultipleChoice(choices=supported_llm_models)] = Field( + default="gpt-3.5-turbo" + ) + max_tokens: int = Field(default=-1, ge=-1, le=4000) + prompt_system: str = Field(default=SYSTEM_PROMPT) + multiselect: Annotated[str, ag.MultipleChoice(choices=["a", "b", "c"])] = Field( + default="a" + ) + + +@ag.route("/llm_call", config_schema=MyConfig) +@ag.instrument(spankind="llm") +async def llm_call(messages: List[Dict[str, Any]], maxtokens): + config = ag.ConfigManager.get_from_route(schema=MyConfig) + chat_completion = await litellm.acompletion( + model=config.model, + messages=messages, + temperature=config.temperature, + max_tokens=maxtokens, + ) + token_usage = chat_completion.usage.dict() + return { + "usage": token_usage, + "message": chat_completion.choices[0].message.content, + "cost": litellm.cost_calculator.completion_cost( + completion_response=chat_completion, model=config.model + ), + } + + +@ag.route("/", config_schema=MyConfig) +@ag.instrument() +async def chat(inputs: ag.MessagesInput = ag.MessagesInput()) -> Dict[str, Any]: + config = ag.ConfigManager.get_from_route(schema=MyConfig) + messages = [{"role": "system", "content": config.prompt_system}] + inputs + max_tokens = config.max_tokens if config.max_tokens != -1 else None + response = await llm_call( + messages=messages, + maxtokens=max_tokens, + ) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/requirements.txt b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/requirements.txt new file mode 100644 index 0000000000..6813723fcd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/requirements.txt @@ -0,0 +1,3 @@ +agenta +openai +litellm \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/run_local.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/run_local.py new file mode 100644 index 0000000000..94c52aa381 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/run_local.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import new_chat_prompt # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/v3.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/v3.py new file mode 100644 index 0000000000..5bbbe17ffd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_new_sdk/v3.py @@ -0,0 +1,53 @@ +import os +from typing import Annotated + +import agenta as ag +from agenta.sdk.managers.config import ConfigManager +from pydantic import BaseModel, Field + +os.environ["AGENTA_MODE"] = "true" + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init() + + +class NestConfig(BaseModel): + some_param: str = "hello" + + +class MyConfigSchema(BaseModel): # <- the app + prompt_1: ag.Prompt = ag.Prompt(prompt_system="hello") + prompt_2: ag.Prompt = ag.Prompt(prompt_system="hello") + nest_config: NestConfig = NestConfig() + + +@ag.route( + path="/", config_schema=MyConfigSchema, is_active=os.environ.get("AGENTA_MODE") +) +def rag(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + if os.environ.get("AGENTA_MODE") == "true": + config = ConfigManager.get_from_route(schema=MyConfigSchema) + else: + config = ConfigManager.get_from_registry( + schema=MyConfigSchema, environment="production" + ) + prompt = config.pro.format(country=country, gender=gender) + + return f"mock output for {prompt}" + + +if __name__ == "__main__": + rag(country="USA", gender="male") diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/baby_name.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/baby_name.py new file mode 100644 index 0000000000..41dd9937f5 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/baby_name.py @@ -0,0 +1,39 @@ +import agenta as ag +from agenta import FloatParam, TextParam +from openai import OpenAI + +client = OpenAI() + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") +ag.config.default( + temperature=FloatParam(0.2), prompt_template=TextParam(default_prompt) +) + + +@ag.entrypoint +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name. + """ + prompt = ag.config.prompt_template.format(country=country, gender=gender) + + chat_completion = client.chat.completions.create( + model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}] + ) + token_usage = chat_completion.usage.dict() + return { + "message": chat_completion.choices[0].message.content, + **{"usage": token_usage}, + "cost": ag.calculate_token_usage("gpt-3.5-turbo", token_usage), + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/chat_prompt.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/chat_prompt.py new file mode 100644 index 0000000000..e6a0826c93 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/chat_prompt.py @@ -0,0 +1,96 @@ +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models + +litellm.drop_params = True + + +prompts = { + "system_prompt": "You are an expert in geography.", + "user_prompt": """What is the capital of {country}?""", +} + +GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] + + +ag.init() +ag.config.default( + temperature=ag.FloatParam(default=1, minval=0.0, maxval=2.0), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=supported_llm_models + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(prompts["system_prompt"]), + prompt_user=ag.TextParam(prompts["user_prompt"]), + top_p=ag.FloatParam(1), + frequence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + presence_penalty=ag.FloatParam(default=0.0, minval=-2.0, maxval=2.0), + force_json=ag.BinaryParam(False), +) + + +@ag.instrument(spankind="llm") +async def llm_call(prompt_system: str, prompt_user: str): + response_format = ( + {"type": "json_object"} + if ag.config.force_json and ag.config.model in GPT_FORMAT_RESPONSE + else {"type": "text"} + ) + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + + # Include frequency_penalty and presence_penalty only if supported + completion_params = {} + if ag.config.model in GPT_FORMAT_RESPONSE: + completion_params["frequency_penalty"] = ag.config.frequence_penalty + completion_params["presence_penalty"] = ag.config.presence_penalty + + response = await litellm.acompletion( + **{ + "model": ag.config.model, + "messages": [ + {"content": prompt_system, "role": "system"}, + {"content": prompt_user, "role": "user"}, + ], + "temperature": ag.config.temperature, + "max_tokens": max_tokens, + "top_p": ag.config.top_p, + "response_format": response_format, + **completion_params, + } + ) + token_usage = response.usage.dict() + return { + "message": response.choices[0].message.content, + "usage": token_usage, + "cost": litellm.cost_calculator.completion_cost( + completion_response=response, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def generate( + inputs: ag.DictInput = ag.DictInput(default_keys=["country"]), +): + try: + prompt_user = ag.config.prompt_user.format(**inputs) + except Exception as e: + prompt_user = ag.config.prompt_user + try: + prompt_system = ag.config.prompt_system.format(**inputs) + except Exception as e: + prompt_system = ag.config.prompt_system + + # SET MAX TOKENS - via completion() + if ag.config.force_json and ag.config.model not in GPT_FORMAT_RESPONSE: + raise ValueError( + "Model {} does not support JSON response format".format(ag.config.model) + ) + + response = await llm_call(prompt_system=prompt_system, prompt_user=prompt_user) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/requirements.txt b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/requirements.txt new file mode 100644 index 0000000000..6813723fcd --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/requirements.txt @@ -0,0 +1,3 @@ +agenta +openai +litellm \ No newline at end of file diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/run_local.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/run_local.py new file mode 100644 index 0000000000..65c782f3af --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/run_local.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import baby_name # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/single_prompt.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/single_prompt.py new file mode 100644 index 0000000000..02dd81cb6a --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/manual_tests/apps_with_old_sdk/single_prompt.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +import agenta as ag +import litellm +from agenta.sdk.assets import supported_llm_models + +litellm.drop_params = True + +SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." + +ag.init() +ag.config.default( + temperature=ag.FloatParam(0.2), + model=ag.GroupedMultipleChoiceParam( + default="gpt-3.5-turbo", choices=supported_llm_models + ), + max_tokens=ag.IntParam(-1, -1, 4000), + prompt_system=ag.TextParam(SYSTEM_PROMPT), + multiselect=ag.MultipleChoiceParam(choices=["a", "b", "c"]), +) + + +@ag.instrument(spankind="llm") +async def llm_call(messages: List[Dict[str, Any]], max_tokens: int): + chat_completion = await litellm.acompletion( + model=ag.config.model, + messages=messages, + temperature=ag.config.temperature, + max_tokens=max_tokens, + ) + token_usage = chat_completion.usage.dict() + return { + "usage": token_usage, + "message": chat_completion.choices[0].message.content, + "cost": litellm.cost_calculator.completion_cost( + completion_response=chat_completion, model=ag.config.model + ), + } + + +@ag.entrypoint +@ag.instrument() +async def chat(inputs: ag.MessagesInput = ag.MessagesInput()) -> Dict[str, Any]: + messages = [{"role": "system", "content": ag.config.prompt_system}] + inputs + max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None + response = await llm_call( + messages=messages, + max_tokens=max_tokens, + ) + return { + "message": response["message"], + "usage": response.get("usage", None), + "cost": response.get("cost", None), + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/run_local.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/run_local.py new file mode 100644 index 0000000000..75266e1443 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/run_local.py @@ -0,0 +1,13 @@ +from uvicorn import run +import agenta +import v3 # This will register the routes with the FastAPI application +import os + +try: + import ingest +except ImportError: + pass + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80) diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/v1.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/v1.py new file mode 100644 index 0000000000..e7b4018201 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/v1.py @@ -0,0 +1,61 @@ +from enum import Enum +from typing import List, Union, Dict +from dataclasses import dataclass +import pdb +import agenta as ag +from agenta import FloatParam, TextParam +from pydantic import BaseModel, Field +from typing import Annotated +from typing import Literal + +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") + +# To add to our types + + +class MyConfig(BaseModel): + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field( + default="gpt3" + ) + # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + +config = MyConfig(prompt_template=default_prompt) + + +@ag.instrument() +def retriever(query: str) -> str: + return "mock output for " + query + + +@ag.entrypoint(config) +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + prompt = config.prompt_template.format(country=country, gender=gender) + + return { + "message": f"mock output for {prompt}", + **{"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}}, + "cost": 0.01, + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/v2.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/v2.py new file mode 100644 index 0000000000..718cfbe42c --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/v2.py @@ -0,0 +1,62 @@ +from enum import Enum +from typing import List, Union, Dict +from dataclasses import dataclass +import pdb +import agenta as ag +from agenta import FloatParam, TextParam +from pydantic import BaseModel, Field +from typing import Annotated +from typing import Literal + +# AGENTA_MODE = TRUE +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") + +# To add to our types +# Option 1 + + +class MyConfigSchema(BaseModel): # <- the app + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field( + default="gpt3" + ) + # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + class Settings: + app_name: str = "myapp" + + +config = MyConfigSchema() + + +@ag.route(path="/", config=config) # side effects +def generate(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + + prompt = config.prompt_template.format(country=country, gender=gender) + + return { + "message": f"mock output for {prompt}", + **{"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}}, + "cost": 0.01, + } diff --git a/services/completion-stateless-sdk/agenta/tests/management_sdk/v4_prompt.py b/services/completion-stateless-sdk/agenta/tests/management_sdk/v4_prompt.py new file mode 100644 index 0000000000..08b581f8ab --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/management_sdk/v4_prompt.py @@ -0,0 +1,80 @@ +from enum import Enum +from typing import List, Union, Dict +from dataclasses import dataclass +import pdb +import agenta as ag +from agenta.sdk.managers.config import ConfigManager +from agenta import FloatParam, TextParam +from pydantic import BaseModel, Field +from typing import Annotated +from typing import Literal + + +# AGENTA_MODE = TRUE +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + +ag.init(config_fname="config.toml") + +# To add to our types +# Option 1 + + +# class MyConfigSchema(BaseModel): # <- the app +# prompt_template: str = Field(default=default_prompt) +# bool_param: bool = Field(default=True) +# int_param: int = Field(default=1, ge=1, le=5) +# float_param: float = Field(default=1.0, gt=0, lt=10) +# multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field(default="gpt3") +# # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") +# grouped_multiple: Annotated[str, ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]})] = Field(default="gpt3") + +# class Settings: +# app_name: str = 'myapp' + + +class Prompt(BaseModel): + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + +class MyConfigSchema(BaseModel): # <- the app + prompt: Prompt = Field(default=Prompt()) + + class settings: + app_name: str = "myapp" + + +@ag.route(path="/", config_schema=MyConfigSchema) +def rag(country: str, gender: str) -> str: + """ + Generate a baby name based on the given country and gender. + + Args: + country (str): The country to generate the name from. + gender (str): The gender of the baby. + + Returns: + str: The generated baby name.` + """ + # if os.environ.get("AGENTA_CLOUD"): + # config = ag.ConfigLoader.from_route(MyConfigSchema) + # config = ag.ConfigLoader.from_backend(MyConfigSchema) + # config = ag.ConfigLoader.from_file(MyConfigSchema) + # config = ConfigManager.from_route(MyConfigSchema) + config = MyConfigSchema() + # config = ConfigManager.from_backend(MyConfigSchema) + prompt = config.prompt.prompt_template.format(country=country, gender=gender) + + return { + "message": f"mock output for {prompt}", + **{"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}}, + "cost": 0.01, + } diff --git a/services/completion-stateless-sdk/agenta/tests/prompt_sdk/conftest.py b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/conftest.py new file mode 100644 index 0000000000..acfc4d80ef --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/conftest.py @@ -0,0 +1,40 @@ +from typing import List, Dict, Any + +import pytest +from pydantic import BaseModel + + +class Prompt(BaseModel): + temperature: float + model: str + max_tokens: int + messages: List[Dict[str, Any]] + top_p: float + frequency_penalty: float + presence_penalty: float + + +class Parameters(BaseModel): + temperature: float + model: str + max_tokens: int + + +@pytest.fixture +def prompt(): + # Sample Prompt object to use in tests + return Prompt( + temperature=0.6, + model="gpt-3.5-turbo", + max_tokens=150, + messages=[ + { + "role": "system", + "content": "You are an assistant that provides concise answers", + }, + {"role": "user", "content": "Explain {topic} in simple terms"}, + ], + top_p=1.0, + frequency_penalty=0.0, + presence_penalty=0.0, + ) diff --git a/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_client.py b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_client.py new file mode 100644 index 0000000000..378fe2e7a0 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_client.py @@ -0,0 +1,73 @@ +from unittest.mock import patch + +import pytest +from agenta.sdk.client import Agenta + + +@pytest.fixture +def agenta_client(): + # Set up the Agenta client with a mock API key + with patch.dict( + "os.environ", + {"AGENTA_API_KEY": "mock_api_key", "AGENTA_HOST": "https://mock.agenta.ai"}, + ): + client = Agenta() + return client + + +def test_get_config_with_caching(agenta_client): + """ + Test the caching mechanism of the get_config method to ensure it returns cached data. + + Args: + agenta_client: The fixture providing an instance of the Agenta client. + """ + # Setup the mock to return a predefined configuration + with patch.object( + agenta_client.client.configs, + "get_config", + return_value={"parameters": "something"}, + ) as mock_get_config: + # Retrieve configuration to store in cache + response = agenta_client.get_config("base123", "production") + assert response == { + "parameters": "something" + }, "First response should match the mock data." + + # Modify the return value of the mock + mock_get_config.return_value = {"parameters": "something else"} + + # Attempt to retrieve configuration again, expecting cached data + response = agenta_client.get_config("base123", "production") + assert response == { + "parameters": "something" + }, "Second response should return cached data, not new mock data." + + +def test_get_config_without_caching(agenta_client): + """ + Test the get_config method without caching to ensure it always fetches new data. + + Args: + agenta_client: The fixture providing an instance of the Agenta client. + """ + # Setup the mock to return a predefined configuration + with patch.object( + agenta_client.client.configs, + "get_config", + return_value={"parameters": "something"}, + ) as mock_get_config: + # Retrieve configuration with caching disabled + response = agenta_client.get_config("base123", "production", cache_timeout=0) + assert response == { + "parameters": "something" + }, "First response should match the mock data." + + # Modify the return value of the mock + mock_get_config.return_value = {"parameters": "something else"} + + # Retrieve new configuration with caching disabled + response = agenta_client.get_config("base123", "production", cache_timeout=0) + assert response == { + "parameters": "something else" + }, "Second response should match the new mock data." diff --git a/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_config_manager.py b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_config_manager.py new file mode 100644 index 0000000000..7a04ae2620 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_config_manager.py @@ -0,0 +1,182 @@ +import json +from typing import Annotated +from unittest.mock import patch + +import yaml # type: ignore +import pytest +from pydantic import BaseModel, Field + +import agenta as ag +from agenta.tests.prompt_sdk.conftest import Parameters +from agenta.sdk.managers.config import ConfigManager + + +# AGENTA_MODE = TRUE +default_prompt = ( + "Give me 10 names for a baby from this country {country} with gender {gender}!!!!" +) + + +# To add to our types +# Option 1 + + +class MyConfigSchema(BaseModel): # <- the app + prompt_template: str = Field(default=default_prompt) + bool_param: bool = Field(default=True) + int_param: int = Field(default=1, ge=1, le=5) + float_param: float = Field(default=1.0, gt=0, lt=10) + multiple: Annotated[str, ag.MultipleChoice(["gpt-3", "gpt-5"])] = Field( + default="gpt3" + ) + # multiple: Literal["gpt-3", "gpt-5"] = Field(default="gpt-3") + grouped_multiple: Annotated[ + str, + ag.MultipleChoice({"openai": ["gpt-3", "gpt-5"], "azure": ["gpt-5", "gpt-3"]}), + ] = Field(default="gpt3") + + class Settings: + app_name: str = "myapp" + + +@pytest.fixture +def sample_config(): + return { + "prompt_template": "Custom prompt: {country} {gender}", + "bool_param": False, + "int_param": 3, + "float_param": 5.5, + "multiple": "gpt-5", + "grouped_multiple": "gpt-5", + } + + +@pytest.fixture +def yaml_config_file(tmp_path, sample_config): + file_path = tmp_path / "test_config.yaml" + with open(file_path, "w") as f: + yaml.dump(sample_config, f) + return file_path + + +@pytest.fixture +def json_config_file(tmp_path, sample_config): + file_path = tmp_path / "test_config.json" + with open(file_path, "w") as f: + json.dump(sample_config, f) + return file_path + + +def test_get_from_yaml(yaml_config_file): + config = ConfigManager.get_from_yaml(str(yaml_config_file), MyConfigSchema) + assert isinstance(config, MyConfigSchema) + assert config.prompt_template == "Custom prompt: {country} {gender}" + assert config.bool_param is False + assert config.int_param == 3 + assert config.float_param == 5.5 + assert config.multiple == "gpt-5" + assert config.grouped_multiple == "gpt-5" + + +def test_get_from_json(json_config_file): + config = ConfigManager.get_from_json(str(json_config_file), MyConfigSchema) + assert isinstance(config, MyConfigSchema) + assert config.prompt_template == "Custom prompt: {country} {gender}" + assert config.bool_param is False + assert config.int_param == 3 + assert config.float_param == 5.5 + assert config.multiple == "gpt-5" + assert config.grouped_multiple == "gpt-5" + + +def test_get_from_yaml_file_not_found(): + with pytest.raises(FileNotFoundError): + ConfigManager.get_from_yaml("non_existent_file.yaml", MyConfigSchema) + + +def test_get_from_json_file_not_found(): + with pytest.raises(FileNotFoundError): + ConfigManager.get_from_json("non_existent_file.json", MyConfigSchema) + + +@patch("agenta.ConfigManager.get_from_registry") +def test_fetch_configuration_and_return_dict(mock_get_config): + # Mock the API response for fetching configuration + + mock_get_config.return_value = { + "temperature": 0.9, + "model": "gpt-3.5-turbo", + "max_tokens": 100, + } + + config = ConfigManager.get_from_registry( + app_slug="my-app", variant_slug="new-variant", variant_version=2 + ) + + assert isinstance(config, dict) + assert config["temperature"] == 0.9 + assert config["model"] == "gpt-3.5-turbo" + assert config["max_tokens"] == 100 + + +@patch("agenta.ConfigManager.get_from_registry") +def test_fetch_configuration_and_return_schema(mock_get_config): + # Mock the API response for fetching configuration + + mock_get_config.return_value = Parameters( + temperature=0.9, model="gpt-3.5-turbo", max_tokens=100 + ) + + config_as_schema = ConfigManager.get_from_registry( + schema=Parameters, + app_slug="my-app", + variant_slug="new-variant", + variant_version=2, + ) + + assert isinstance(config_as_schema, Parameters) + assert config_as_schema.temperature == 0.9 + assert config_as_schema.model == "gpt-3.5-turbo" + assert config_as_schema.max_tokens == 100 + + +@pytest.mark.asyncio +@patch("agenta.ConfigManager.aget_from_registry") +async def test_afetch_configuration_and_return_dict(mock_aget_config): + # Mock the API response for fetching configuration + + mock_aget_config.return_value = { + "temperature": 0.9, + "model": "gpt-3.5-turbo", + "max_tokens": 100, + } + + config = await ConfigManager.aget_from_registry( + app_slug="my-app", variant_slug="new-variant", variant_version=2 + ) + + assert config["temperature"] == 0.9 + assert config["model"] == "gpt-3.5-turbo" + assert config["max_tokens"] == 100 + + +@pytest.mark.asyncio +@patch("agenta.ConfigManager.aget_from_registry") +async def test_afetch_configuration_and_return_schema(mock_aget_config): + # Mock the API response for fetching configuration + + mock_aget_config.return_value = Parameters( + temperature=0.9, model="gpt-3.5-turbo", max_tokens=100 + ) + + config_as_schema = await ConfigManager.aget_from_registry( + schema=Parameters, + app_slug="my-app", + variant_slug="new-variant", + variant_version=2, + ) + + assert isinstance(config_as_schema, Parameters) + assert config_as_schema.temperature == 0.9 + assert config_as_schema.model == "gpt-3.5-turbo" + assert config_as_schema.max_tokens == 100 diff --git a/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_deployment_manager.py b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_deployment_manager.py new file mode 100644 index 0000000000..2edd37bf83 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_deployment_manager.py @@ -0,0 +1,88 @@ +from unittest.mock import patch + +import pytest + +from agenta.sdk.managers import DeploymentManager +from agenta.sdk.managers.shared import DeploymentResponse + + +@patch("agenta.DeploymentManager.deploy") +def test_deploy_variant(mock_deploy): + # Mock the API response for deploying a variant + mock_deploy.return_value = DeploymentResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-variant", + "variant_version": 2, + "environment_slug": "staging", + "deployed_at": "2023-10-02T12:30:00Z", + "deployed_by": "user@example.com", + } + ) + + deployment = DeploymentManager.deploy( + app_slug="my-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment.environment_slug == "staging" + assert deployment.deployed_by == "user@example.com" + + +@pytest.mark.asyncio +@patch("agenta.DeploymentManager.adeploy") +async def test_adeploy_variant(mock_adeploy): + # Mock the API response for deploying a variant + mock_adeploy.return_value = DeploymentResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-variant", + "variant_version": 8, + "environment_slug": "production", + "deployed_at": "2023-10-02T12:30:00Z", + "deployed_by": "abc@example.com", + } + ) + + deployment = await DeploymentManager.adeploy( + app_slug="my-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment.environment_slug == "production" + assert deployment.deployed_by == "abc@example.com" + + +@patch("agenta.DeploymentManager.deploy") +def test_deploy_variant_not_found(mock_deploy): + # Mock the API response for deploying a variant + mock_deploy.return_value = {"detail": "Config not found."} + + deployment = DeploymentManager.deploy( + app_slug="non-existent-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment["detail"] == "Config not found." # type: ignore + + +@pytest.mark.asyncio +@patch("agenta.DeploymentManager.adeploy") +async def test_adeploy_variant_not_found(mock_adeploy): + # Mock the API response for deploying a variant + mock_adeploy.return_value = {"detail": "Config not found."} + + deployment = await DeploymentManager.adeploy( + app_slug="non-existent-app", + variant_slug="new-variant", + environment_slug="staging", + variant_version=None, + ) + + assert deployment["detail"] == "Config not found." # type: ignore diff --git a/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_variant_manager.py b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_variant_manager.py new file mode 100644 index 0000000000..1c633534f3 --- /dev/null +++ b/services/completion-stateless-sdk/agenta/tests/prompt_sdk/test_variant_manager.py @@ -0,0 +1,313 @@ +from unittest.mock import patch + +import pytest + +from agenta.sdk.managers import VariantManager +from agenta.sdk.managers.shared import ConfigurationResponse + + +@patch("agenta.VariantManager.create") +def test_variant_create(mock_create, prompt): + # Mock the API response for creating a variant + mock_create.return_value = ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-variant", + "variant_version": 1, + "params": prompt.model_dump(), + } + ) + + variant = VariantManager.create( + parameters=prompt.model_dump(), + variant_slug="new-variant", + app_slug="my-app", + ) + + assert variant.app_slug == "my-app" + assert variant.variant_slug == "new-variant" + assert variant.variant_version == 1 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.acreate") +async def test_variant_acreate(mock_acreate, prompt): + # Mock the API response for creating a variant + mock_acreate.return_value = ConfigurationResponse( + **{ + "app_slug": "qa-assistant", + "variant_slug": "school-assistant", + "variant_version": 1, + "params": prompt.model_dump(), + } + ) + + variant = await VariantManager.acreate( + parameters=prompt.model_dump(), + variant_slug="school-assistant", + app_slug="qa-assistant", + ) + + assert variant.app_slug == "qa-assistant" + assert variant.variant_slug == "school-assistant" + assert variant.variant_version == 1 + + +@patch("agenta.VariantManager.commit") +def test_variant_commit(mock_commit, prompt): + # Mock the API response for committing a variant + mock_commit.return_value = ConfigurationResponse( + **{ + "app_slug": "my-new-app", + "variant_slug": "new-new-variant", + "variant_version": 2, + "params": prompt.model_dump(), + } + ) + + variant = VariantManager.commit( + parameters=prompt.model_dump(), + variant_slug="new-variant", + app_slug="my-app", + ) + + assert variant.variant_version == 2 + assert type(variant.params) == dict + assert variant.params["temperature"] == 0.6 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.acommit") +async def test_variant_acommit(mock_acommit, prompt): + # Mock the API response for committing a variant + mock_acommit.return_value = ConfigurationResponse( + **{ + "app_slug": "my-new-app", + "variant_slug": "new-variant", + "variant_version": 4, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ) + + variant = await VariantManager.acommit( + parameters=prompt.model_dump(), + variant_slug="new-variant", + app_slug="my-new-app", + ) + + assert variant.variant_version == 4 + assert type(variant.params) == dict + assert variant.params["temperature"] == 1.0 + + +@patch("agenta.VariantManager.delete") +def test_variant_delete(mock_delete): + # Mock the API response for deleting a variant + mock_delete.return_value = 204 + + result = VariantManager.delete( + variant_slug="obsolete-variant", + app_slug="my-app", + ) + + assert result == 204 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.adelete") +async def test_variant_adelete(mock_adelete): + # Mock the API response for deleting a variant + mock_adelete.return_value = 204 + + result = await VariantManager.adelete( + variant_slug="obsolete-variant-2", + app_slug="my-app", + ) + + assert result == 204 + + +@patch("agenta.VariantManager.list") +def test_variant_list(mock_list, prompt): + # Mock the API response for listing variants + mock_list.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = VariantManager.list(app_slug="my-app") + + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.alist") +async def test_variant_alist(mock_alist, prompt): + # Mock the API response for listing variants + mock_alist.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = await VariantManager.alist(app_slug="my-app") + + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 + + +@patch("agenta.VariantManager.history") +def test_variant_history(mock_history, prompt): + # Mock the API response for listing variant history + mock_history.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = VariantManager.history( + variant_slug="new-app-variant", + app_id="06056815-c9d0-4cdb-bcc7-7c9e6a3fe5e3", + ) + + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 + + +@pytest.mark.asyncio +@patch("agenta.VariantManager.ahistory") +async def test_variant_ahistory(mock_ahistory, prompt): + # Mock the API response for listing variants + mock_ahistory.return_value = [ + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 0, + "params": {**prompt.model_dump(), "temperature": 0.2}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 1, + "params": {**prompt.model_dump(), "temperature": 0.56}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 2, + "params": {**prompt.model_dump(), "temperature": 1.0}, + } + ), + ConfigurationResponse( + **{ + "app_slug": "my-app", + "variant_slug": "new-app-variant", + "variant_version": 3, + "params": {**prompt.model_dump(), "temperature": 0.85}, + } + ), + ] + + variants = await VariantManager.ahistory( + variant_slug="new-app-variant", app_id="06056815-c9d0-4cdb-bcc7-7c9e6a3fe5e3" + ) + assert len(variants) == 4 + assert variants[0].variant_slug == "new-app-variant" + assert variants[1].variant_version == 1 diff --git a/services/completion-stateless-sdk/docker-compose.yml b/services/completion-stateless-sdk/docker-compose.yml new file mode 100644 index 0000000000..a8ff09855b --- /dev/null +++ b/services/completion-stateless-sdk/docker-compose.yml @@ -0,0 +1,22 @@ +services: + completion-stateless-sdk: + build: . + volumes: + - .:/app + environment: + - AGENTA_UNAUTHORIZED_EXECUTION_ALLOWED=True + - AGENTA_HOST=http://host.docker.internal + networks: + - agenta-network + labels: + - "traefik.http.routers.completion-stateless-sdk.rule=PathPrefix(`/completion-stateless-sdk/`)" + - "traefik.http.routers.completion-stateless-sdk.entrypoints=web" + - "traefik.http.middlewares.completion-stateless-sdk-strip.stripprefix.prefixes=/completion-stateless-sdk" + - "traefik.http.middlewares.completion-stateless-sdk-strip.stripprefix.forceslash=true" + - "traefik.http.routers.completion-stateless-sdk.middlewares=completion-stateless-sdk-strip" + - "traefik.http.services.completion-stateless-sdk.loadbalancer.server.port=80" + - "traefik.http.routers.completion-stateless-sdk.service=completion-stateless-sdk" + +networks: + agenta-network: + external: true diff --git a/services/completion-stateless-sdk/entrypoint.sh b/services/completion-stateless-sdk/entrypoint.sh new file mode 100755 index 0000000000..3c6b353144 --- /dev/null +++ b/services/completion-stateless-sdk/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +exec python main.py diff --git a/services/completion-stateless-sdk/main.py b/services/completion-stateless-sdk/main.py new file mode 100644 index 0000000000..81d1b6c73d --- /dev/null +++ b/services/completion-stateless-sdk/main.py @@ -0,0 +1,8 @@ +from uvicorn import run +import agenta +import _app # This will register the routes with the FastAPI application +import os + + +if __name__ == "__main__": + run("agenta:app", host="0.0.0.0", port=80, reload=True) diff --git a/services/completion-stateless-sdk/mock_litellm.py b/services/completion-stateless-sdk/mock_litellm.py new file mode 100644 index 0000000000..a5b57a68cc --- /dev/null +++ b/services/completion-stateless-sdk/mock_litellm.py @@ -0,0 +1,53 @@ +from typing import Dict, Any, List +from dataclasses import dataclass + + +@dataclass +class MockUsage: + prompt_tokens: int = 10 + completion_tokens: int = 20 + total_tokens: int = 30 + + def dict(self): + return { + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "total_tokens": self.total_tokens, + } + + +@dataclass +class MockMessage: + content: str = "This is a mock response from the LLM." + + +@dataclass +class MockChoice: + message: MockMessage = MockMessage() + + +@dataclass +class MockCompletion: + choices: List[MockChoice] = None + usage: MockUsage = None + + def __init__(self): + self.choices = [MockChoice()] + self.usage = MockUsage() + + +class MockLiteLLM: + async def acompletion( + self, + model: str, + messages: List[Dict[str, Any]], + temperature: float, + max_tokens: int = None, + **kwargs + ) -> MockCompletion: + return MockCompletion() + + class cost_calculator: + @staticmethod + def completion_cost(completion_response, model): + return 0.0001 # Mock cost diff --git a/services/completion-stateless-sdk/supported_llm_models.py b/services/completion-stateless-sdk/supported_llm_models.py new file mode 100644 index 0000000000..c314be0e37 --- /dev/null +++ b/services/completion-stateless-sdk/supported_llm_models.py @@ -0,0 +1,91 @@ +supported_llm_models = { + "Mistral AI": [ + "mistral/mistral-tiny", + "mistral/mistral-small", + "mistral/mistral-medium", + "mistral/mistral-large-latest", + ], + "Open AI": [ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4o", + "gpt-4-1106-preview", + ], + "Gemini": [ + "gemini/gemini-1.5-pro-latest", + ], + "Cohere": [ + "cohere/command-light", + "cohere/command-r-plus", + "cohere/command-nightly", + ], + "Anthropic": [ + "anthropic/claude-2.1", + "anthropic/claude-2", + "anthropic/claude-instant-1.2", + "anthropic/claude-instant-1", + ], + "Anyscale": [ + "anyscale/meta-llama/Llama-2-13b-chat-hf", + "anyscale/meta-llama/Llama-2-70b-chat-hf", + ], + "Perplexity AI": [ + "perplexity/pplx-7b-chat", + "perplexity/pplx-70b-chat", + "perplexity/pplx-7b-online", + "perplexity/pplx-70b-online", + ], + "DeepInfra": [ + "deepinfra/meta-llama/Llama-2-70b-chat-hf", + "deepinfra/meta-llama/Llama-2-13b-chat-hf", + "deepinfra/codellama/CodeLlama-34b-Instruct-hf", + "deepinfra/mistralai/Mistral-7B-Instruct-v0.1", + "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1", + ], + "Together AI": [ + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b", + "together_ai/togethercomputer/LLaMA-2-7B-32K", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "together_ai/togethercomputer/alpaca-7b", + "together_ai/togethercomputer/CodeLlama-34b-Instruct", + "together_ai/togethercomputer/CodeLlama-34b-Python", + "together_ai/WizardLM/WizardCoder-Python-34B-V1.0", + "together_ai/NousResearch/Nous-Hermes-Llama2-13b", + "together_ai/Austism/chronos-hermes-13b", + ], + "Aleph Alpha": [ + "luminous-base", + "luminous-base-control", + "luminous-extended-control", + "luminous-supreme", + ], + "OpenRouter": [ + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/anthropic/claude-instant-v1", + "openrouter/google/palm-2-chat-bison", + "openrouter/google/palm-2-codechat-bison", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + ], + "Groq": [ + "groq/llama3-8b-8192", + "groq/llama3-70b-8192", + "groq/llama2-70b-4096", + "groq/mixtral-8x7b-32768", + "groq/gemma-7b-it", + ], +} + + +def get_all_supported_llm_models(): + """ + Returns a list of evaluators + + Returns: + List[dict]: A list of evaluator dictionaries. + """ + return supported_llm_models diff --git a/services/test/__init__.py b/services/test/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/services/test/__init__.py @@ -0,0 +1 @@ + diff --git a/services/test/conftest.py b/services/test/conftest.py new file mode 100644 index 0000000000..f89e1c3c09 --- /dev/null +++ b/services/test/conftest.py @@ -0,0 +1,24 @@ +import pytest +import httpx +import pytest_asyncio + + +# Configure pytest-asyncio to use strict mode +def pytest_configure(config): + config.option.asyncio_mode = "strict" + + +@pytest.fixture +def chat_url(): + return "http://localhost/chat-live-sdk" # Adjust this if your services run on different ports + + +@pytest.fixture +def completion_url(): + return "http://localhost/completion-live-sdk" + + +@pytest_asyncio.fixture +async def async_client(): + async with httpx.AsyncClient() as client: + yield client diff --git a/services/test/mock_agenta.py b/services/test/mock_agenta.py new file mode 100644 index 0000000000..429135a296 --- /dev/null +++ b/services/test/mock_agenta.py @@ -0,0 +1,24 @@ +"""Mock agenta module for testing""" +from typing import Any, Dict, Type, TypeVar, Optional +from dataclasses import dataclass + +T = TypeVar('T') + +@dataclass +class ConfigManager: + """Mock ConfigManager""" + @staticmethod + def get_from_route(schema: Type[T]) -> T: + return schema() + +def route(path: str = "", config_schema: Optional[Type[Any]] = None): + """Mock route decorator""" + def decorator(func): + return func + return decorator + +def instrument(): + """Mock instrument decorator""" + def decorator(func): + return func + return decorator diff --git a/services/test/mock_litellm.py b/services/test/mock_litellm.py new file mode 100644 index 0000000000..7e8916d10f --- /dev/null +++ b/services/test/mock_litellm.py @@ -0,0 +1,70 @@ +import pytest +from typing import List, Optional, Dict, Any +from dataclasses import dataclass + +@dataclass +class Message: + role: str + content: str + tool_calls: Optional[List[Dict[str, Any]]] = None + +@dataclass +class Choice: + message: Message + index: int = 0 + finish_reason: str = "stop" + +@dataclass +class Response: + choices: List[Choice] + model: str = "gpt-4" + id: str = "mock-response-id" + +class MockLiteLLM: + """Mock LiteLLM for testing""" + + async def acompletion(self, **kwargs): + """Mock async completion""" + model = kwargs.get("model", "gpt-4") + messages = kwargs.get("messages", []) + tools = kwargs.get("tools", []) + response_format = kwargs.get("response_format", None) + + # Simulate different response types based on input + if tools: + # Function calling response + tool_calls = [{ + "id": "call_123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": '{"location": "London", "unit": "celsius"}' + } + }] + message = Message( + role="assistant", + content=None, + tool_calls=tool_calls + ) + elif response_format and response_format["type"] == "json_object": + # JSON response + message = Message( + role="assistant", + content='{"colors": ["red", "blue", "green"]}' + ) + else: + # Regular text response + message = Message( + role="assistant", + content="This is a mock response" + ) + + return Response( + choices=[Choice(message=message)], + model=model + ) + +@pytest.fixture +def mock_litellm(): + """Fixture to provide mock LiteLLM instance""" + return MockLiteLLM() diff --git a/services/test/requirements.txt b/services/test/requirements.txt new file mode 100644 index 0000000000..9bd3895bcd --- /dev/null +++ b/services/test/requirements.txt @@ -0,0 +1,3 @@ +pytest==7.4.3 +pytest-asyncio==0.21.1 +httpx==0.25.2 diff --git a/services/test/test_new_chat_service.py b/services/test/test_new_chat_service.py new file mode 100644 index 0000000000..555f80f0e6 --- /dev/null +++ b/services/test/test_new_chat_service.py @@ -0,0 +1,76 @@ +import pytest +import pytest_asyncio +from typing import Dict, Any + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.asyncio +async def test_generate(async_client, chat_url): + payload = { + "inputs": [ + { + "role": "user", + "content": "What are some innovative tech solutions for a startup?", + } + ] + } + response = await async_client.post(f"{chat_url}/generate", json=payload) + assert response.status_code == 200 + data = response.json() + + # Check response structure + assert "version" in data + assert "data" in data + assert "tree" in data + + # Check tree structure + tree = data["tree"] + assert "nodes" in tree + assert len(tree["nodes"]) > 0 + + # Check first node + node = tree["nodes"][0] + assert "lifecycle" in node + assert "data" in node + assert "metrics" in node + assert "meta" in node + + # Check configuration + config = node["meta"]["configuration"] + assert config["model"] == "gpt-3.5-turbo" + assert "temperature" in config + assert "prompt_system" in config + + +@pytest.mark.asyncio +async def test_run(async_client, chat_url): + payload = { + "inputs": [ + { + "role": "user", + "content": "What are the best practices for startup growth?", + } + ] + } + response = await async_client.post(f"{chat_url}/run", json=payload) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) + + +@pytest.mark.asyncio +async def test_generate_deployed(async_client, chat_url): + payload = { + "inputs": [{"role": "user", "content": "How to build a successful tech team?"}] + } + response = await async_client.post(f"{chat_url}/generate_deployed", json=payload) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) diff --git a/services/test/test_old_chat_service.py b/services/test/test_old_chat_service.py new file mode 100644 index 0000000000..555f80f0e6 --- /dev/null +++ b/services/test/test_old_chat_service.py @@ -0,0 +1,76 @@ +import pytest +import pytest_asyncio +from typing import Dict, Any + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.asyncio +async def test_generate(async_client, chat_url): + payload = { + "inputs": [ + { + "role": "user", + "content": "What are some innovative tech solutions for a startup?", + } + ] + } + response = await async_client.post(f"{chat_url}/generate", json=payload) + assert response.status_code == 200 + data = response.json() + + # Check response structure + assert "version" in data + assert "data" in data + assert "tree" in data + + # Check tree structure + tree = data["tree"] + assert "nodes" in tree + assert len(tree["nodes"]) > 0 + + # Check first node + node = tree["nodes"][0] + assert "lifecycle" in node + assert "data" in node + assert "metrics" in node + assert "meta" in node + + # Check configuration + config = node["meta"]["configuration"] + assert config["model"] == "gpt-3.5-turbo" + assert "temperature" in config + assert "prompt_system" in config + + +@pytest.mark.asyncio +async def test_run(async_client, chat_url): + payload = { + "inputs": [ + { + "role": "user", + "content": "What are the best practices for startup growth?", + } + ] + } + response = await async_client.post(f"{chat_url}/run", json=payload) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) + + +@pytest.mark.asyncio +async def test_generate_deployed(async_client, chat_url): + payload = { + "inputs": [{"role": "user", "content": "How to build a successful tech team?"}] + } + response = await async_client.post(f"{chat_url}/generate_deployed", json=payload) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) diff --git a/services/test/test_old_completion_service.py b/services/test/test_old_completion_service.py new file mode 100644 index 0000000000..33c1d46dfd --- /dev/null +++ b/services/test/test_old_completion_service.py @@ -0,0 +1,78 @@ +import pytest +import pytest_asyncio +from typing import Dict, Any + +pytestmark = pytest.mark.asyncio + + +async def test_health(async_client, completion_url): + response = await async_client.get(f"{completion_url}/health") + assert response.status_code == 200 + data = response.json() + assert data == {"status": "ok"} + + +async def test_generate(async_client, completion_url): + payload = {"inputs": {"country": "France"}} + response = await async_client.post(f"{completion_url}/generate", json=payload) + assert response.status_code == 200 + data = response.json() + + # Check response structure + assert "version" in data + assert "data" in data + assert "tree" in data + + # Check tree structure + tree = data["tree"] + assert "nodes" in tree + assert len(tree["nodes"]) > 0 + + # Check first node + node = tree["nodes"][0] + assert "lifecycle" in node + assert "data" in node + assert "metrics" in node + assert "meta" in node + + # Check configuration + config = node["meta"]["configuration"] + assert config["model"] == "gpt-3.5-turbo" + assert "temperature" in config + assert "prompt_system" in config + assert "prompt_user" in config + + +async def test_playground_run(async_client, completion_url): + payload = {"inputs": {"country": "Spain"}} + response = await async_client.post(f"{completion_url}/playground/run", json=payload) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) + + +async def test_generate_deployed(async_client, completion_url): + payload = {"inputs": {"country": "Germany"}} + response = await async_client.post( + f"{completion_url}/generate_deployed", json=payload + ) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) + + +async def test_run(async_client, completion_url): + payload = {"inputs": {"country": "Italy"}} + response = await async_client.post(f"{completion_url}/run", json=payload) + assert response.status_code == 200 + data = response.json() + + assert "version" in data + assert "data" in data + assert isinstance(data["data"], str) diff --git a/services/test/test_prompt_template.py b/services/test/test_prompt_template.py new file mode 100644 index 0000000000..08f80b729b --- /dev/null +++ b/services/test/test_prompt_template.py @@ -0,0 +1,251 @@ +import pytest +import sys +import os +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from typing import Dict, List +from pydantic import ValidationError + +sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "completion-new-sdk-prompt")) +from _app import ( + PromptTemplate, + ModelConfig, + Message, + InputValidationError, + TemplateFormatError, + ResponseFormat +) +from .mock_litellm import MockLiteLLM + +# Test Data +BASIC_MESSAGES = [ + Message(role="system", content="You are a {type} assistant"), + Message(role="user", content="Help me with {task}") +] + +TOOL_MESSAGES = [ + Message(role="system", content="You are a function calling assistant"), + Message(role="user", content="Get the weather for {location}") +] + +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location"] + } + } +} + +class TestPromptTemplateBasics: + """Test basic functionality of PromptTemplate""" + + def test_create_template(self): + """Test creating a basic template""" + template = PromptTemplate(messages=BASIC_MESSAGES) + assert len(template.messages) == 2 + assert template.messages[0].role == "system" + assert template.messages[1].role == "user" + + def test_create_template_with_model_config(self): + """Test creating template with custom model config""" + model_config = ModelConfig( + model="gpt-4", + temperature=0.7, + max_tokens=100 + ) + template = PromptTemplate( + messages=BASIC_MESSAGES, + model_config=model_config + ) + assert template.model_config.model == "gpt-4" + assert template.model_config.temperature == 0.7 + assert template.model_config.max_tokens == 100 + + def test_invalid_model_config(self): + """Test validation errors for invalid model config""" + with pytest.raises(ValidationError): + ModelConfig(temperature=3.0) # temperature > 2.0 + + with pytest.raises(ValidationError): + ModelConfig(max_tokens=-2) # max_tokens < -1 + +class TestPromptFormatting: + """Test template formatting functionality""" + + def test_basic_format(self): + """Test basic formatting with valid inputs""" + template = PromptTemplate(messages=BASIC_MESSAGES) + formatted = template.format(type="coding", task="Python") + assert formatted.messages[0].content == "You are a coding assistant" + assert formatted.messages[1].content == "Help me with Python" + + def test_format_with_validation(self): + """Test formatting with input validation""" + template = PromptTemplate( + messages=BASIC_MESSAGES, + input_keys=["type", "task"] + ) + # Valid inputs + formatted = template.format(type="coding", task="Python") + assert formatted.messages[0].content == "You are a coding assistant" + + # Missing input + with pytest.raises(InputValidationError) as exc: + template.format(type="coding") + assert "Missing required inputs: task" in str(exc.value) + + # Extra input + with pytest.raises(InputValidationError) as exc: + template.format(type="coding", task="Python", extra="value") + assert "Unexpected inputs: extra" in str(exc.value) + + @pytest.mark.parametrize("template_format,template_string,inputs,expected", [ + ("fstring", "Hello {name}", {"name": "World"}, "Hello World"), + ("jinja2", "Hello {{ name }}", {"name": "World"}, "Hello World"), + ("curly", "Hello {{name}}", {"name": "World"}, "Hello World"), + ]) + def test_format_types(self, template_format, template_string, inputs, expected): + """Test different format types""" + template = PromptTemplate( + messages=[Message(role="user", content=template_string)], + template_format=template_format + ) + formatted = template.format(**inputs) + assert formatted.messages[0].content == expected + + def test_format_errors(self): + """Test formatting error cases""" + template = PromptTemplate(messages=BASIC_MESSAGES) + + # Missing variable + with pytest.raises(TemplateFormatError) as exc: + template.format(type="coding") # missing 'task' + assert "Missing required variable" in str(exc.value) + + # Invalid template + bad_template = PromptTemplate( + messages=[Message(role="user", content="Hello {")] + ) + with pytest.raises(TemplateFormatError): + bad_template.format(name="World") + +class TestOpenAIIntegration: + """Test OpenAI/LiteLLM integration features""" + + def test_basic_openai_kwargs(self): + """Test basic OpenAI kwargs generation""" + template = PromptTemplate( + messages=BASIC_MESSAGES, + model_config=ModelConfig( + model="gpt-4", + temperature=0.7, + max_tokens=100 + ) + ) + kwargs = template.to_openai_kwargs() + assert kwargs["model"] == "gpt-4" + assert kwargs["temperature"] == 0.7 + assert kwargs["max_tokens"] == 100 + assert len(kwargs["messages"]) == 2 + + def test_tools_openai_kwargs(self): + """Test OpenAI kwargs with tools""" + template = PromptTemplate( + messages=TOOL_MESSAGES, + model_config=ModelConfig( + model="gpt-4", + tools=[WEATHER_TOOL], + tool_choice="auto" + ) + ) + kwargs = template.to_openai_kwargs() + assert len(kwargs["tools"]) == 1 + assert kwargs["tools"][0]["type"] == "function" + assert kwargs["tool_choice"] == "auto" + + def test_json_mode_openai_kwargs(self): + """Test OpenAI kwargs with JSON mode""" + template = PromptTemplate( + messages=BASIC_MESSAGES, + model_config=ModelConfig( + model="gpt-4", + response_format=ResponseFormat(type="json_object") + ) + ) + kwargs = template.to_openai_kwargs() + assert kwargs["response_format"]["type"] == "json_object" + + def test_optional_params_openai_kwargs(self): + """Test that optional params are only included when non-default""" + template = PromptTemplate( + messages=BASIC_MESSAGES, + model_config=ModelConfig( + model="gpt-4", + frequency_penalty=0.0, # default value + presence_penalty=0.5 # non-default value + ) + ) + kwargs = template.to_openai_kwargs() + assert "frequency_penalty" not in kwargs + assert kwargs["presence_penalty"] == 0.5 + +class TestEndToEndScenarios: + """Test end-to-end scenarios""" + + @pytest.mark.asyncio + async def test_chat_completion(self, mock_litellm): + """Test chat completion with basic prompt""" + template = PromptTemplate( + messages=[ + Message(role="user", content="Say hello to {name}") + ], + model_config=ModelConfig(model="gpt-3.5-turbo") + ) + formatted = template.format(name="World") + kwargs = formatted.to_openai_kwargs() + + response = await mock_litellm.acompletion(**kwargs) + assert response.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_function_calling(self, mock_litellm): + """Test function calling scenario""" + template = PromptTemplate( + messages=TOOL_MESSAGES, + model_config=ModelConfig( + model="gpt-4", + tools=[WEATHER_TOOL], + tool_choice="auto" + ) + ) + formatted = template.format(location="London") + kwargs = formatted.to_openai_kwargs() + + response = await mock_litellm.acompletion(**kwargs) + assert response.choices[0].message.tool_calls is not None + + @pytest.mark.asyncio + async def test_json_mode(self, mock_litellm): + """Test JSON mode response""" + template = PromptTemplate( + messages=[ + Message(role="user", content="List 3 colors in JSON") + ], + model_config=ModelConfig( + model="gpt-4", + response_format=ResponseFormat(type="json_object") + ) + ) + kwargs = template.to_openai_kwargs() + + response = await mock_litellm.acompletion(**kwargs) + assert response.choices[0].message.content.startswith("{") + assert response.choices[0].message.content.endswith("}")