Skip to content

Commit

Permalink
refactoring to easly use
Browse files Browse the repository at this point in the history
  • Loading branch information
Yongtae723 committed Oct 10, 2023
1 parent d97fc3c commit 06c0eeb
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 68 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ In recent years, various LLMs, embedding models, and LLM flows utilizing them ha

This repository aims to treat LLMs and Embeddings as a hyperparameter, with the goal of automatically searching for the optimal hyperparameter of the LLM flow.

**Below image is the concept image of this repository (image is took from [Flowise](https://github.com/FlowiseAI/Flowise) and slightly modified). Component of LangChain like a LLM or Embedding can be treated as hyperparameter. You will find component from various candidate that can optimize score.**
![concept_image](documents/image/concept.png)
**This image is the concept image of this repository (image is took from [Flowise](https://github.com/FlowiseAI/Flowise) and slightly modified). Component of LangChain like a LLM or Embedding can be treated as hyperparameter. You will find component from various candidate that can optimize score.**

This repository is strongly inspired by [lightning-hydra-template](https://github.com/ashleve/lightning-hydra-template)🎉

Expand Down
Empty file.
53 changes: 0 additions & 53 deletions llmflowoptimizer/component/base/base.py

This file was deleted.

8 changes: 3 additions & 5 deletions llmflowoptimizer/component/evaluation/sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,8 @@
)
from scipy import stats

from llmflowoptimizer.component.base.base import BaseChainModel, BaseEvaluationModel


class Evaluation(BaseEvaluationModel):
class Evaluation:
"""Define the evaluation system. llmflowoptimizer optimizes the hyperparameters of the model
based on the output of this evaluation system.
Expand Down Expand Up @@ -58,14 +56,14 @@ def __init__(

def evaluate(
self,
model: BaseChainModel,
model: Any, # this model should be defined in llmflowoptimizer/component/model/sample_qa.py
):
"""Return of this method is used for hyperparameter optimization."""
project_name = self.project_name + datetime.datetime.now().strftime("_%Y-%m-%d_%H-%M-%S")
result = run_on_dataset(
client=self.client,
dataset_name=self.dataset_name,
llm_or_chain_factory=model,
llm_or_chain_factory=model.get_chain(),
evaluation=self.evaluation_config,
project_name=project_name,
input_mapper=lambda x: x,
Expand Down
4 changes: 1 addition & 3 deletions llmflowoptimizer/component/model/sample_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,8 @@
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter

from llmflowoptimizer.component.base.base import BaseChainModel


class SampleQA(BaseChainModel):
class SampleQA:
"""Define the flow of the model to be adjusted."""

def __init__(
Expand Down
9 changes: 3 additions & 6 deletions llmflowoptimizer/run.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
import logging
from pprint import pprint
from typing import Any, Dict, List, Optional, Tuple
from typing import Optional

import hydra
import rootutils
from omegaconf import DictConfig

from llmflowoptimizer.component.base.base import BaseChainModel, BaseEvaluationModel
from llmflowoptimizer.utils.utils import print_config_tree

log = logging.getLogger(__name__)
Expand All @@ -19,14 +17,13 @@
def main(cfg: DictConfig) -> Optional[float]:
"""This is the main entry point of the tune script."""
log.info(f"Instantiating model <{cfg.model._target_}>")
model: BaseChainModel = hydra.utils.instantiate(cfg.model)
model = model.get_chain()
model = hydra.utils.instantiate(cfg.model)

if cfg.extras.print_config:
print_config_tree(cfg)

if cfg.extras.evaluation:
evaluator: BaseEvaluationModel = hydra.utils.instantiate(cfg.evaluation)
evaluator = hydra.utils.instantiate(cfg.evaluation)
log.info(f"Evaluating <{cfg.model._target_}>")
metric_value = evaluator.evaluate(model)
log.info(f"Score is {metric_value}")
Expand Down

0 comments on commit 06c0eeb

Please sign in to comment.