From ac11388f79059af9ed6663fc631ba40d4c1ed1c3 Mon Sep 17 00:00:00 2001 From: Joel Alexander Date: Wed, 24 Jul 2024 17:35:19 -0400 Subject: [PATCH 1/3] feat(history): add history to completion --- .../fetching_and_using_parea_deployments.py | 6 +- .../tracing_with_deployed_prompt.py | 48 +++++++------- .../tracing_without_deployed_prompt.py | 62 +++++++++---------- parea/schemas/log.py | 1 + 4 files changed, 61 insertions(+), 56 deletions(-) diff --git a/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py b/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py index a69e169b..9b907cea 100644 --- a/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py +++ b/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py @@ -2,7 +2,7 @@ from dotenv import load_dotenv -from parea import Parea +from parea import Parea, trace from parea.schemas.models import Completion, CompletionResponse, UseDeployedPrompt, UseDeployedPromptResponse load_dotenv() @@ -10,6 +10,7 @@ p = Parea(api_key=os.getenv("PAREA_API_KEY")) +@trace def main() -> CompletionResponse: return p.completion(Completion(deployment_id="p-4cbYJ0LIy0gaWb6Z819k7", llm_inputs={"x": "python", "y": "fastapi"})) @@ -19,7 +20,8 @@ def get_critic_prompt(val: str) -> UseDeployedPromptResponse: if __name__ == "__main__": - print(get_critic_prompt("Python")) + print(main()) + # print(get_critic_prompt("Python")) # a = UseDeployedPromptResponse( # deployment_id="p-87NFVeQg30Hk2Hatw1h72", # name="deploy-test", diff --git a/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py b/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py index ca61924e..9156765a 100644 --- a/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py +++ b/cookbook/parea_llm_proxy/deployments/tracing_with_deployed_prompt.py @@ -8,7 +8,7 @@ from dotenv import load_dotenv from parea import Parea, get_current_trace_id, trace -from parea.schemas import Completion, CompletionResponse, FeedbackRequest +from parea.schemas import Completion, CompletionResponse, LLMInputs, Message, Role load_dotenv() @@ -18,12 +18,13 @@ def deployed_argument_generator(query: str, additional_description: str = "") -> str: return p.completion( Completion( - deployment_id="p-RG8d9rfJc_0cctwfpb_n6", + deployment_id="p-XOh3kp8B0nIE82WgioPnr", llm_inputs={ "additional_description": additional_description, "date": f"{datetime.now()}", "query": query, }, + llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]), ) ).content @@ -31,8 +32,9 @@ def deployed_argument_generator(query: str, additional_description: str = "") -> def deployed_critic(argument: str) -> str: return p.completion( Completion( - deployment_id="p-fXgZytT3dJjXD_71TDR4s", + deployment_id="p-PSOwRyIPaQRq4xQW3MbpV", llm_inputs={"argument": argument}, + llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]), ) ).content @@ -40,7 +42,7 @@ def deployed_critic(argument: str) -> str: def deployed_refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str: return p.completion( Completion( - deployment_id="p--G2s9okMTvBEh3d8YqLY2", + deployment_id="p-bJ3-UKh9-ixapZafaRBsj", llm_inputs={ "additional_description": additional_description, "date": f"{datetime.now()}", @@ -48,6 +50,7 @@ def deployed_refiner(query: str, additional_description: str, current_arg: str, "argument": current_arg, "criticism": criticism, }, + llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]), ) ).content @@ -55,7 +58,7 @@ def deployed_refiner(query: str, additional_description: str, current_arg: str, def deployed_refiner2(query: str, additional_description: str, current_arg: str, criticism: str) -> CompletionResponse: return p.completion( Completion( - deployment_id="p--G2s9okMTvBEh3d8YqLY2", + deployment_id="p-bJ3-UKh9-ixapZafaRBsj", llm_inputs={ "additional_description": additional_description, "date": f"{datetime.now()}", @@ -63,15 +66,16 @@ def deployed_refiner2(query: str, additional_description: str, current_arg: str, "argument": current_arg, "criticism": criticism, }, + llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]), ) ) -# @trace -# def deployed_argument_chain(query: str, additional_description: str = "") -> str: -# argument = deployed_argument_generator(query, additional_description) -# criticism = deployed_critic(argument) -# return deployed_refiner(query, additional_description, argument, criticism) +@trace +def deployed_argument_chain(query: str, additional_description: str = "") -> str: + argument = deployed_argument_generator(query, additional_description) + criticism = deployed_critic(argument) + return deployed_refiner(query, additional_description, argument, criticism) @trace( @@ -86,21 +90,21 @@ def deployed_argument_chain_tags_metadata(query: str, additional_description: st if __name__ == "__main__": - # result1 = deployed_argument_chain( - # "Whether coffee is good for you.", - # additional_description="Provide a concise, few sentence argument on why coffee is good for you.", - # ) - # print(result1) + result1 = deployed_argument_chain( + "Whether coffee is good for you.", + additional_description="Provide a concise, few sentence argument on why coffee is good for you.", + ) + print(result1) result2, trace_id = deployed_argument_chain_tags_metadata( "Whether coffee is good for you.", additional_description="Provide a concise, few sentence argument on why coffee is good for you.", ) print(json.dumps(asdict(result2), indent=2)) - p.record_feedback( - FeedbackRequest( - trace_id=trace_id, - score=0.7, # 0.0 (bad) to 1.0 (good) - target="Coffee is wonderful. End of story.", - ) - ) + # p.record_feedback( + # FeedbackRequest( + # trace_id=trace_id, + # score=0.7, # 0.0 (bad) to 1.0 (good) + # target="Coffee is wonderful. End of story.", + # ) + # ) diff --git a/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py b/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py index 9191cfcc..dbed5498 100644 --- a/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py +++ b/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py @@ -6,7 +6,7 @@ from dotenv import load_dotenv from parea import Parea, get_current_trace_id, trace -from parea.schemas import Completion, CompletionResponse, FeedbackRequest, LLMInputs, Message, ModelParams +from parea.schemas import Completion, CompletionResponse, LLMInputs, Message, ModelParams, Role load_dotenv() @@ -21,9 +21,7 @@ def call_llm( return p.completion( data=Completion( llm_configuration=LLMInputs( - model=model, - model_params=ModelParams(temp=temperature), - messages=[Message(**d) for d in data], + model=model, model_params=ModelParams(temp=temperature), messages=[Message(**d) for d in data], history=[Message(role=Role.user, content="Some history")] ) ) ) @@ -137,31 +135,31 @@ def json_call(): additional_description="Provide a concise, few sentence argument on why coffee is good for you.", ) print(result1) - - result2, trace_id2 = argument_chain2( - "Whether wine is good for you.", - additional_description="Provide a concise, few sentence argument on why wine is good for you.", - ) - print(trace_id2, result2) - p.record_feedback( - FeedbackRequest( - trace_id=trace_id2, - score=0.7, # 0.0 (bad) to 1.0 (good) - target="Wine is wonderful.", - ) - ) - - result3 = argument_chain3( - "Whether moonshine is good for you.", - additional_description="Provide a concise, few sentence argument on why moonshine is good for you.", - ) - print(result3.content) - p.record_feedback( - FeedbackRequest( - trace_id=result3.inference_id, - score=0.5, # 0.0 (bad) to 1.0 (good) - target="Moonshine is wonderful. End of story.", - ) - ) - - print(json_call()) + # + # result2, trace_id2 = argument_chain2( + # "Whether wine is good for you.", + # additional_description="Provide a concise, few sentence argument on why wine is good for you.", + # ) + # print(trace_id2, result2) + # p.record_feedback( + # FeedbackRequest( + # trace_id=trace_id2, + # score=0.7, # 0.0 (bad) to 1.0 (good) + # target="Wine is wonderful.", + # ) + # ) + # + # result3 = argument_chain3( + # "Whether moonshine is good for you.", + # additional_description="Provide a concise, few sentence argument on why moonshine is good for you.", + # ) + # print(result3.content) + # p.record_feedback( + # FeedbackRequest( + # trace_id=result3.inference_id, + # score=0.5, # 0.0 (bad) to 1.0 (good) + # target="Moonshine is wonderful. End of story.", + # ) + # ) + # + # print(json_call()) diff --git a/parea/schemas/log.py b/parea/schemas/log.py index 34e19aaf..6a4a7050 100644 --- a/parea/schemas/log.py +++ b/parea/schemas/log.py @@ -50,6 +50,7 @@ class LLMInputs: provider: Optional[str] = None model_params: Optional[ModelParams] = ModelParams() messages: Optional[List[Message]] = None + history: Optional[List[Message]] = None functions: Optional[List[Any]] = None function_call: Optional[Union[str, Dict[str, str]]] = None From bc323f18922e57bde37f220b6f79da9d06dd94db Mon Sep 17 00:00:00 2001 From: Joel Alexander Date: Wed, 24 Jul 2024 17:36:00 -0400 Subject: [PATCH 2/3] feat(history): add history to completion --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b1a260ef..ae48a881 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "parea-ai" packages = [{ include = "parea" }] -version = "0.2.188" +version = "0.2.189" description = "Parea python sdk" readme = "README.md" authors = ["joel-parea-ai "] From 9c69612080f8f52b7156c87bd711ad699d78af79 Mon Sep 17 00:00:00 2001 From: Joel Alexander Date: Wed, 24 Jul 2024 17:44:22 -0400 Subject: [PATCH 3/3] feat(history): add history to completion --- .../fetching_and_using_parea_deployments.py | 6 +- .../tracing_without_deployed_prompt.py | 58 +++++++++---------- 2 files changed, 31 insertions(+), 33 deletions(-) diff --git a/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py b/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py index 9b907cea..a69e169b 100644 --- a/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py +++ b/cookbook/parea_llm_proxy/deployments/fetching_and_using_parea_deployments.py @@ -2,7 +2,7 @@ from dotenv import load_dotenv -from parea import Parea, trace +from parea import Parea from parea.schemas.models import Completion, CompletionResponse, UseDeployedPrompt, UseDeployedPromptResponse load_dotenv() @@ -10,7 +10,6 @@ p = Parea(api_key=os.getenv("PAREA_API_KEY")) -@trace def main() -> CompletionResponse: return p.completion(Completion(deployment_id="p-4cbYJ0LIy0gaWb6Z819k7", llm_inputs={"x": "python", "y": "fastapi"})) @@ -20,8 +19,7 @@ def get_critic_prompt(val: str) -> UseDeployedPromptResponse: if __name__ == "__main__": - print(main()) - # print(get_critic_prompt("Python")) + print(get_critic_prompt("Python")) # a = UseDeployedPromptResponse( # deployment_id="p-87NFVeQg30Hk2Hatw1h72", # name="deploy-test", diff --git a/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py b/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py index dbed5498..1673bffc 100644 --- a/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py +++ b/cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py @@ -6,7 +6,7 @@ from dotenv import load_dotenv from parea import Parea, get_current_trace_id, trace -from parea.schemas import Completion, CompletionResponse, LLMInputs, Message, ModelParams, Role +from parea.schemas import Completion, CompletionResponse, FeedbackRequest, LLMInputs, Message, ModelParams, Role load_dotenv() @@ -135,31 +135,31 @@ def json_call(): additional_description="Provide a concise, few sentence argument on why coffee is good for you.", ) print(result1) - # - # result2, trace_id2 = argument_chain2( - # "Whether wine is good for you.", - # additional_description="Provide a concise, few sentence argument on why wine is good for you.", - # ) - # print(trace_id2, result2) - # p.record_feedback( - # FeedbackRequest( - # trace_id=trace_id2, - # score=0.7, # 0.0 (bad) to 1.0 (good) - # target="Wine is wonderful.", - # ) - # ) - # - # result3 = argument_chain3( - # "Whether moonshine is good for you.", - # additional_description="Provide a concise, few sentence argument on why moonshine is good for you.", - # ) - # print(result3.content) - # p.record_feedback( - # FeedbackRequest( - # trace_id=result3.inference_id, - # score=0.5, # 0.0 (bad) to 1.0 (good) - # target="Moonshine is wonderful. End of story.", - # ) - # ) - # - # print(json_call()) + + result2, trace_id2 = argument_chain2( + "Whether wine is good for you.", + additional_description="Provide a concise, few sentence argument on why wine is good for you.", + ) + print(trace_id2, result2) + p.record_feedback( + FeedbackRequest( + trace_id=trace_id2, + score=0.7, # 0.0 (bad) to 1.0 (good) + target="Wine is wonderful.", + ) + ) + + result3 = argument_chain3( + "Whether moonshine is good for you.", + additional_description="Provide a concise, few sentence argument on why moonshine is good for you.", + ) + print(result3.content) + p.record_feedback( + FeedbackRequest( + trace_id=result3.inference_id, + score=0.5, # 0.0 (bad) to 1.0 (good) + target="Moonshine is wonderful. End of story.", + ) + ) + + print(json_call())