From ffd0337f7da0644115972e794cbe431d39597a90 Mon Sep 17 00:00:00 2001 From: Yongtae Date: Sat, 30 Sep 2023 17:57:03 +0900 Subject: [PATCH] modify image --- configs/evaluation/default.yaml | 2 +- configs/hparams_search/optuna.yaml | 2 +- configs/model/default.yaml | 2 +- llmflowoptimizer/component/evaluation/sample.py | 9 +++++++-- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/configs/evaluation/default.yaml b/configs/evaluation/default.yaml index 86c8d1d..9180ab3 100644 --- a/configs/evaluation/default.yaml +++ b/configs/evaluation/default.yaml @@ -1,3 +1,3 @@ _target_: llmflowoptimizer.component.evaluation.sample.Evaluation dataset_name: "NYC test" -project_name: "${task_name} ${now:%Y-%m-%d}_${now:%H-%M-%S}" +project_name: ${task_name} diff --git a/configs/hparams_search/optuna.yaml b/configs/hparams_search/optuna.yaml index b8fbe12..0dd1114 100644 --- a/configs/hparams_search/optuna.yaml +++ b/configs/hparams_search/optuna.yaml @@ -42,7 +42,7 @@ hydra: # define hyperparameter search space params: model/text_splitter: choice(RecursiveCharacter, CharacterTextSplitter) # if you want to change component level - model.text_splitter.chunk_size: choice(500, 1000, 1500) + model.text_splitter.chunk_size: range(500, 1500, 100) # if you want to change specific hyperparameter model/llm: choice(OpenAI, GPTTurbo, GPT4) # you can also define custom search space for objective function (https://hydra.cc/docs/plugins/optuna_sweeper/#experimental--custom-search-space-optimization) # please see ./custom_search_space.py for example diff --git a/configs/model/default.yaml b/configs/model/default.yaml index 337ef52..7a237cc 100644 --- a/configs/model/default.yaml +++ b/configs/model/default.yaml @@ -6,6 +6,6 @@ defaults: - text_splitter: RecursiveCharacter - llm: OpenAI -_target_: llmflowoptimizer.component.model.sample_qa.SampleQA # what we defined on llmflowoptimizer/models/sample_qa.py +_target_: llmflowoptimizer.component.model.sample_qa.SampleQA # what we defined on llmflowoptimizer/component/model/sample_qa.py data_path: ${paths.reference_data_dir}/nyc_wikipedia.txt diff --git a/llmflowoptimizer/component/evaluation/sample.py b/llmflowoptimizer/component/evaluation/sample.py index 5e58d82..33dad3f 100644 --- a/llmflowoptimizer/component/evaluation/sample.py +++ b/llmflowoptimizer/component/evaluation/sample.py @@ -1,4 +1,5 @@ -from typing import Any +import datetime +from typing import Any, Optional from langchain.smith import RunEvalConfig, run_on_dataset from langsmith import Client @@ -31,10 +32,12 @@ def __call__(self, input): def __init__( self, dataset_name: str, + project_name: Optional[str] = None, **kwargs: Any, ): self.client = Client() self.dataset_name = dataset_name + self.project_name = project_name self.additional_setting = kwargs # create evaluation chains @@ -57,12 +60,14 @@ def evaluate( self, model: BaseChainModel, ): - # evaluation metrics is calculated by langsmith. + """Return of this method is used for hyperparameter optimization.""" + project_name = self.project_name + datetime.datetime.now().strftime("_%Y-%m-%d_%H-%M-%S") result = run_on_dataset( client=self.client, dataset_name=self.dataset_name, llm_or_chain_factory=model, evaluation=self.evaluation_config, + project_name=project_name, input_mapper=lambda x: x, **self.additional_setting, )