Skip to content

Commit

Permalink
Merge pull request #1010 from parea-ai/PAI-1402-add-history-via-p-com…
Browse files Browse the repository at this point in the history
…pletion

feat(history): add history to completion
  • Loading branch information
jalexanderII authored Jul 24, 2024
2 parents 1eeec4b + 9c69612 commit 577bd3c
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from dotenv import load_dotenv

from parea import Parea, get_current_trace_id, trace
from parea.schemas import Completion, CompletionResponse, FeedbackRequest
from parea.schemas import Completion, CompletionResponse, LLMInputs, Message, Role

load_dotenv()

Expand All @@ -18,60 +18,64 @@
def deployed_argument_generator(query: str, additional_description: str = "") -> str:
return p.completion(
Completion(
deployment_id="p-RG8d9rfJc_0cctwfpb_n6",
deployment_id="p-XOh3kp8B0nIE82WgioPnr",
llm_inputs={
"additional_description": additional_description,
"date": f"{datetime.now()}",
"query": query,
},
llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]),
)
).content


def deployed_critic(argument: str) -> str:
return p.completion(
Completion(
deployment_id="p-fXgZytT3dJjXD_71TDR4s",
deployment_id="p-PSOwRyIPaQRq4xQW3MbpV",
llm_inputs={"argument": argument},
llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]),
)
).content


def deployed_refiner(query: str, additional_description: str, current_arg: str, criticism: str) -> str:
return p.completion(
Completion(
deployment_id="p--G2s9okMTvBEh3d8YqLY2",
deployment_id="p-bJ3-UKh9-ixapZafaRBsj",
llm_inputs={
"additional_description": additional_description,
"date": f"{datetime.now()}",
"query": query,
"argument": current_arg,
"criticism": criticism,
},
llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]),
)
).content


def deployed_refiner2(query: str, additional_description: str, current_arg: str, criticism: str) -> CompletionResponse:
return p.completion(
Completion(
deployment_id="p--G2s9okMTvBEh3d8YqLY2",
deployment_id="p-bJ3-UKh9-ixapZafaRBsj",
llm_inputs={
"additional_description": additional_description,
"date": f"{datetime.now()}",
"query": query,
"argument": current_arg,
"criticism": criticism,
},
llm_configuration=LLMInputs(history=[Message(role=Role.user, content="Some history")]),
)
)


# @trace
# def deployed_argument_chain(query: str, additional_description: str = "") -> str:
# argument = deployed_argument_generator(query, additional_description)
# criticism = deployed_critic(argument)
# return deployed_refiner(query, additional_description, argument, criticism)
@trace
def deployed_argument_chain(query: str, additional_description: str = "") -> str:
argument = deployed_argument_generator(query, additional_description)
criticism = deployed_critic(argument)
return deployed_refiner(query, additional_description, argument, criticism)


@trace(
Expand All @@ -86,21 +90,21 @@ def deployed_argument_chain_tags_metadata(query: str, additional_description: st


if __name__ == "__main__":
# result1 = deployed_argument_chain(
# "Whether coffee is good for you.",
# additional_description="Provide a concise, few sentence argument on why coffee is good for you.",
# )
# print(result1)
result1 = deployed_argument_chain(
"Whether coffee is good for you.",
additional_description="Provide a concise, few sentence argument on why coffee is good for you.",
)
print(result1)

result2, trace_id = deployed_argument_chain_tags_metadata(
"Whether coffee is good for you.",
additional_description="Provide a concise, few sentence argument on why coffee is good for you.",
)
print(json.dumps(asdict(result2), indent=2))
p.record_feedback(
FeedbackRequest(
trace_id=trace_id,
score=0.7, # 0.0 (bad) to 1.0 (good)
target="Coffee is wonderful. End of story.",
)
)
# p.record_feedback(
# FeedbackRequest(
# trace_id=trace_id,
# score=0.7, # 0.0 (bad) to 1.0 (good)
# target="Coffee is wonderful. End of story.",
# )
# )
6 changes: 2 additions & 4 deletions cookbook/parea_llm_proxy/tracing_without_deployed_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dotenv import load_dotenv

from parea import Parea, get_current_trace_id, trace
from parea.schemas import Completion, CompletionResponse, FeedbackRequest, LLMInputs, Message, ModelParams
from parea.schemas import Completion, CompletionResponse, FeedbackRequest, LLMInputs, Message, ModelParams, Role

load_dotenv()

Expand All @@ -21,9 +21,7 @@ def call_llm(
return p.completion(
data=Completion(
llm_configuration=LLMInputs(
model=model,
model_params=ModelParams(temp=temperature),
messages=[Message(**d) for d in data],
model=model, model_params=ModelParams(temp=temperature), messages=[Message(**d) for d in data], history=[Message(role=Role.user, content="Some history")]
)
)
)
Expand Down
1 change: 1 addition & 0 deletions parea/schemas/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ class LLMInputs:
provider: Optional[str] = None
model_params: Optional[ModelParams] = ModelParams()
messages: Optional[List[Message]] = None
history: Optional[List[Message]] = None
functions: Optional[List[Any]] = None
function_call: Optional[Union[str, Dict[str, str]]] = None

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "parea-ai"
packages = [{ include = "parea" }]
version = "0.2.188"
version = "0.2.189"
description = "Parea python sdk"
readme = "README.md"
authors = ["joel-parea-ai <[email protected]>"]
Expand Down

0 comments on commit 577bd3c

Please sign in to comment.