From 45958515dc0b3fd5b5b0a2675190269eeabb6403 Mon Sep 17 00:00:00 2001 From: Li Yin Date: Thu, 21 Nov 2024 08:31:47 -0800 Subject: [PATCH 01/10] test --- Makefile | 51 +++++++++++++++++++++++++++++++++++++++++++ docs/source/index.rst | 2 +- pyproject.toml | 15 +++++++++---- 3 files changed, 63 insertions(+), 5 deletions(-) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..33b7bce5 --- /dev/null +++ b/Makefile @@ -0,0 +1,51 @@ +# Define variables for common directories and commands +PYTHON = poetry run +SRC_DIR = . + +# Default target: Show help +.PHONY: help +help: + @echo "Available targets:" + @echo " setup Install dependencies and set up pre-commit hooks" + @echo " format Run Black and Ruff to format the code" + @echo " lint Run Ruff to check code quality" + @echo " test Run tests with pytest" + @echo " precommit Run pre-commit hooks on all files" + @echo " clean Clean up temporary files and build artifacts" + +# Install dependencies and set up pre-commit hooks +.PHONY: setup +setup: + poetry install + poetry run pre-commit install + +# Format code using Black and Ruff +.PHONY: format +format: + $(PYTHON) black $(SRC_DIR) + $(PYTHON) ruff check $(SRC_DIR) --fix + +# Run lint checks using Ruff +.PHONY: lint +lint: + $(PYTHON) ruff check $(SRC_DIR) + +# Run all pre-commit hooks on all files +.PHONY: precommit +precommit: + $(PYTHON) pre-commit run --all-files + +# Run tests +.PHONY: test +test: + $(PYTHON) pytest + +# Clean up temporary files and build artifacts +.PHONY: clean +clean: + rm -rf .pytest_cache + rm -rf .mypy_cache + rm -rf __pycache__ + rm -rf build dist *.egg-info + find . -type d -name "__pycache__" -exec rm -r {} + + find . -type f -name "*.pyc" -delete diff --git a/docs/source/index.rst b/docs/source/index.rst index 5f265357..df3e2805 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -281,7 +281,7 @@ Just define it as a ``Parameter`` and pass it to our ``Generator``. Unites Research and Production ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Our team has experience in both AI research and production. +Our team has experience in both AI research and production. test We are building a library that unites the two worlds, forming a healthy LLM application ecosystem. - To resemble the PyTorch library makes it easier for LLM researchers to use the library. diff --git a/pyproject.toml b/pyproject.toml index a3b7cd42..ce9be698 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,10 +17,7 @@ packages = [ [tool.poetry.dependencies] python = ">=3.11, <4.0" adalflow = { path = "adalflow", develop = true } -# torch = "^2.3.1" openai = "^1.34.0" -# lightrag = {path = "lightrag/dist/lightrag-0.0.0a11-py3-none-any.whl"} -# lightrag = "^0.0.0a13" [tool.poetry.group.dev.dependencies] @@ -55,4 +52,14 @@ colorama = "^0.4.6" [build-system] requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" \ No newline at end of file +build-backend = "poetry.core.masonry.api" + + +# for formatting and linting +[tool.black] +line-length = 88 +target-version = ["py311"] + +[tool.ruff] +extend-ignore = ["E402"] # Ignore module-level import issues +line-length = 88 From f489a4d74a69148bc8a9ad531d0c2447354085bc Mon Sep 17 00:00:00 2001 From: Li Yin Date: Thu, 21 Nov 2024 08:42:39 -0800 Subject: [PATCH 02/10] add black and ruff config in the pyproject toml, add a simple Makefile to help people setup and apply formatting --- .pre-commit-config.yaml | 2 ++ docs/source/index.rst | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ecf5b070..111c91f9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,6 +14,7 @@ repos: hooks: - id: black args: ['--line-length=88'] + exclude: ^docs/|.*\.(json|yaml|md|txt)$ - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.4.2 @@ -21,6 +22,7 @@ repos: # Run the linter. - id: ruff args: ['--fix', '--extend-ignore=E402'] + exclude: ^docs/|.*\.(json|yaml|md|txt)$ # - repo: https://github.com/pycqa/flake8 # rev: 4.0.1 # hooks: diff --git a/docs/source/index.rst b/docs/source/index.rst index df3e2805..5f265357 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -281,7 +281,7 @@ Just define it as a ``Parameter`` and pass it to our ``Generator``. Unites Research and Production ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Our team has experience in both AI research and production. test +Our team has experience in both AI research and production. We are building a library that unites the two worlds, forming a healthy LLM application ecosystem. - To resemble the PyTorch library makes it easier for LLM researchers to use the library. From 388e03eec29415122592b6fe32b2c9427a3401a2 Mon Sep 17 00:00:00 2001 From: Li Yin Date: Thu, 21 Nov 2024 12:28:39 -0800 Subject: [PATCH 03/10] improve the contributor message --- docs/source/contributor/index.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/source/contributor/index.rst b/docs/source/contributor/index.rst index e4ea677b..7e2c2da0 100644 --- a/docs/source/contributor/index.rst +++ b/docs/source/contributor/index.rst @@ -1,19 +1,20 @@ Contributor Guide ============================= -Welcome to the AdalFlow community! We're building the most user-friendly, modular, and powerful library for building and auto-optimizing LLM applications, rom Chatbots and RAGs to Agents. +Welcome to the AdalFlow community! We're building the most user-friendly, modular, and powerful library for building and auto-optimizing LLM applications, from Chatbots and RAGs to Agents. *Think of AdalFlow for LLM applications and prompt engineering as the PyTorch/TensorFlow/JAX equivalent for AI modeling.* -The goal is to provide basic and fundamental building blocks to create advanced applications with auto-optimization out of the box. + +The goal of the library is to provide basic and fundamental building blocks to create advanced applications with auto-optimization out of the box. As we mature, we anticipate that more RAG, memory-based chatbots, or agent frameworks will be built on top of AdalFlow’s building blocks, such as `retriever` and `generator`. We highly suggest you read our :ref:`design principle` before you start contributing. -We only accept high quality contributions. -We appreciate contributors, but we have to hold our libary responsible for our users. -Once you decide to contribute, we hope it’s not just to list your name on the repo. -More importantly, we want you to learn and improve your own skills, support your favorite projects, and contribute to the community! +We greatly appreciate all contributions, from bug fixes to new features, and value every contributor. +However, we must be selective to ensure our library remains reliable for users. +We hope your contributions go beyond listing your name on the repo—our goal is for you to learn, grow your skills, support your favorite projects, and give back to the community! +The goal of this guide is to design the best process for maintaining the quality of our library while enabling the community to make meaningful contributions. It took us three months to set up this contributor guide, as we first tested the process with early contributors. -Our goal is to design the best process for maintaining the quality of our library while enabling the community to make meaningful contributions. -We are determined to make AdalFlow as great and legendary as PyTorch. +*We are determined to make AdalFlow as great and legendary as PyTorch.* + This guide covers the overall contributing process, along with development essentials for environment setup, coding, testing, and documentation. Here’s to the future of LLM applications! From 79b0b2ebd47d815863a63479f1ddef6e78b01691 Mon Sep 17 00:00:00 2001 From: fm1320 Date: Fri, 22 Nov 2024 14:45:33 +0000 Subject: [PATCH 04/10] Trigger pre-commit hooks From 3196631c01bd759d5f4464d09d8415864aea8654 Mon Sep 17 00:00:00 2001 From: fm1320 Date: Fri, 22 Nov 2024 14:52:25 +0000 Subject: [PATCH 05/10] Fix issues identified by pre-commit hooks --- adalflow/PACKAGING.md | 4 +- .../model_client/anthropic_client.py | 2 +- .../components/model_client/bedrock_client.py | 64 ++++++++++--------- adalflow/adalflow/optim/optimizer.py | 2 +- adalflow/adalflow/utils/lazy_import.py | 2 +- benchmarks/README.md | 4 +- .../hotpot_dev_v1_simplified_random_100.json | 2 +- .../ReAct_agent/paper_data/paper_dev_10.json | 2 +- benchmarks/ReAct_agent/utils/tools.py | 54 ++++++++++------ .../hotpot_qa/adal_exp/train_vanilla.py | 2 +- docs/Makefile | 2 +- docs/requirements.txt | 2 +- docs/source/_static/css/custom.css | 2 +- docs/source/contributor/version_control.rst | 8 +-- docs/source/resources/resources.rst | 4 -- use_cases/classification/train.py | 2 +- .../bbh/object_count/train_new.py | 2 +- 17 files changed, 88 insertions(+), 72 deletions(-) diff --git a/adalflow/PACKAGING.md b/adalflow/PACKAGING.md index de951f1d..11d87306 100644 --- a/adalflow/PACKAGING.md +++ b/adalflow/PACKAGING.md @@ -33,10 +33,10 @@ pip install "dist/adalflow-0.1.0b1-py3-none-any.whl[openai,groq,faiss]" 1. Update the version in `pyproject.toml` 2. Add the version number in `adalflow/__init__.py` -3. Build the package +3. Build the package 4. Test the package locally 5. Push the changes to the repository 6. Ensure to run `poetry lock --no-update` in the root directory (project-level) to update the lock file for other directories such as `tutorials`, `use_cases`, `benchmarks`, etc. 7. Update the `CHANGELOG.md` file with the new version number and the changes made in the new version. -## TODO: we need to automate the version update process. Help is appreciated. \ No newline at end of file +## TODO: we need to automate the version update process. Help is appreciated. diff --git a/adalflow/adalflow/components/model_client/anthropic_client.py b/adalflow/adalflow/components/model_client/anthropic_client.py index 1c83f421..92fa535a 100644 --- a/adalflow/adalflow/components/model_client/anthropic_client.py +++ b/adalflow/adalflow/components/model_client/anthropic_client.py @@ -167,4 +167,4 @@ async def acall( elif model_type == ModelType.LLM: return await self.async_client.messages.create(**api_kwargs) else: - raise ValueError(f"model_type {model_type} is not supported") \ No newline at end of file + raise ValueError(f"model_type {model_type} is not supported") diff --git a/adalflow/adalflow/components/model_client/bedrock_client.py b/adalflow/adalflow/components/model_client/bedrock_client.py index 549c1988..fef08606 100644 --- a/adalflow/adalflow/components/model_client/bedrock_client.py +++ b/adalflow/adalflow/components/model_client/bedrock_client.py @@ -15,17 +15,21 @@ bedrock_runtime_exceptions = boto3.client( service_name="bedrock-runtime", - region_name=os.getenv("AWS_REGION_NAME", "us-east-1") + region_name=os.getenv("AWS_REGION_NAME", "us-east-1"), ).exceptions def get_first_message_content(completion: Dict) -> str: r"""When we only need the content of the first message. It is the default parser for chat completion.""" - return completion['output']['message']['content'][0]['text'] + return completion["output"]["message"]["content"][0]["text"] -__all__ = ["BedrockAPIClient", "get_first_message_content", "bedrock_runtime_exceptions"] +__all__ = [ + "BedrockAPIClient", + "get_first_message_content", + "bedrock_runtime_exceptions", +] class BedrockAPIClient(ModelClient): @@ -34,15 +38,15 @@ class BedrockAPIClient(ModelClient): """ def __init__( - self, - aws_profile_name=None, - aws_region_name=None, - aws_access_key_id=None, - aws_secret_access_key=None, - aws_session_token=None, - aws_connection_timeout=None, - aws_read_timeout=None, - chat_completion_parser: Callable = None, + self, + aws_profile_name=None, + aws_region_name=None, + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + aws_connection_timeout=None, + aws_read_timeout=None, + chat_completion_parser: Callable = None, ): super().__init__() self._aws_profile_name = aws_profile_name @@ -56,7 +60,7 @@ def __init__( self.session = None self.sync_client = self.init_sync_client() self.chat_completion_parser = ( - chat_completion_parser or get_first_message_content + chat_completion_parser or get_first_message_content ) def init_sync_client(self): @@ -67,14 +71,16 @@ def init_sync_client(self): aws_profile_name = self._aws_profile_name or os.getenv("AWS_PROFILE_NAME") aws_region_name = self._aws_region_name or os.getenv("AWS_REGION_NAME") aws_access_key_id = self._aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID") - aws_secret_access_key = self._aws_secret_access_key or os.getenv("AWS_SECRET_ACCESS_KEY") + aws_secret_access_key = self._aws_secret_access_key or os.getenv( + "AWS_SECRET_ACCESS_KEY" + ) aws_session_token = self._aws_session_token or os.getenv("AWS_SESSION_TOKEN") config = None if self._aws_connection_timeout or self._aws_read_timeout: config = Config( connect_timeout=self._aws_connection_timeout, # Connection timeout in seconds - read_timeout=self._aws_read_timeout # Read timeout in seconds + read_timeout=self._aws_read_timeout, # Read timeout in seconds ) session = boto3.Session( @@ -93,7 +99,7 @@ def init_async_client(self): def parse_chat_completion(self, completion): log.debug(f"completion: {completion}") try: - data = completion['output']['message']['content'][0]['text'] + data = completion["output"]["message"]["content"][0]["text"] usage = self.track_completion_usage(completion) return GeneratorOutput(data=None, usage=usage, raw_response=data) except Exception as e: @@ -104,18 +110,18 @@ def parse_chat_completion(self, completion): def track_completion_usage(self, completion: Dict) -> CompletionUsage: r"""Track the completion usage.""" - usage = completion['usage'] + usage = completion["usage"] return CompletionUsage( - completion_tokens=usage['outputTokens'], - prompt_tokens=usage['inputTokens'], - total_tokens=usage['totalTokens'] + completion_tokens=usage["outputTokens"], + prompt_tokens=usage["inputTokens"], + total_tokens=usage["totalTokens"], ) def convert_inputs_to_api_kwargs( - self, - input: Optional[Any] = None, - model_kwargs: Dict = {}, - model_type: ModelType = ModelType.UNDEFINED + self, + input: Optional[Any] = None, + model_kwargs: Dict = {}, + model_type: ModelType = ModelType.UNDEFINED, ): """ check the converse api doc here: @@ -133,11 +139,11 @@ def convert_inputs_to_api_kwargs( @backoff.on_exception( backoff.expo, ( - bedrock_runtime_exceptions.ThrottlingException, - bedrock_runtime_exceptions.ModelTimeoutException, - bedrock_runtime_exceptions.InternalServerException, - bedrock_runtime_exceptions.ModelErrorException, - bedrock_runtime_exceptions.ValidationException + bedrock_runtime_exceptions.ThrottlingException, + bedrock_runtime_exceptions.ModelTimeoutException, + bedrock_runtime_exceptions.InternalServerException, + bedrock_runtime_exceptions.ModelErrorException, + bedrock_runtime_exceptions.ValidationException, ), max_time=5, ) diff --git a/adalflow/adalflow/optim/optimizer.py b/adalflow/adalflow/optim/optimizer.py index b6a68d2a..c6fad814 100644 --- a/adalflow/adalflow/optim/optimizer.py +++ b/adalflow/adalflow/optim/optimizer.py @@ -67,7 +67,7 @@ def __init__( dataset: Sequence[DataClass] = None, exclude_input_fields_from_bootstrap_demos: bool = False, *args, - **kwargs + **kwargs, ): self._weighted = weighted self.dataset = dataset diff --git a/adalflow/adalflow/utils/lazy_import.py b/adalflow/adalflow/utils/lazy_import.py index aa4c988f..ccf79332 100644 --- a/adalflow/adalflow/utils/lazy_import.py +++ b/adalflow/adalflow/utils/lazy_import.py @@ -78,7 +78,7 @@ class LazyImport: """ def __init__( - self, import_path: str, optional_package: OptionalPackages, *args, **kwargs + self, import_path: str, optional_package: OptionalPackages, *args, **kwargs ): if args or kwargs: raise TypeError( diff --git a/benchmarks/README.md b/benchmarks/README.md index 1b7f6824..5ae4aa78 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -1,3 +1,3 @@ -Benchmarking is an integral development part of the project. +Benchmarking is an integral development part of the project. -Contributors are encouraged to write benchmarks for their code, besides of the unit tests in `tests/` directory. \ No newline at end of file +Contributors are encouraged to write benchmarks for their code, besides of the unit tests in `tests/` directory. diff --git a/benchmarks/ReAct_agent/paper_data/hotpot_dev_v1_simplified_random_100.json b/benchmarks/ReAct_agent/paper_data/hotpot_dev_v1_simplified_random_100.json index 13abbe21..a105c71e 100644 --- a/benchmarks/ReAct_agent/paper_data/hotpot_dev_v1_simplified_random_100.json +++ b/benchmarks/ReAct_agent/paper_data/hotpot_dev_v1_simplified_random_100.json @@ -499,4 +499,4 @@ "answer": "grand assembly", "type": "bridge" } -] \ No newline at end of file +] diff --git a/benchmarks/ReAct_agent/paper_data/paper_dev_10.json b/benchmarks/ReAct_agent/paper_data/paper_dev_10.json index 2f53d1e7..edada19a 100644 --- a/benchmarks/ReAct_agent/paper_data/paper_dev_10.json +++ b/benchmarks/ReAct_agent/paper_data/paper_dev_10.json @@ -429,4 +429,4 @@ ] ] } -] \ No newline at end of file +] diff --git a/benchmarks/ReAct_agent/utils/tools.py b/benchmarks/ReAct_agent/utils/tools.py index 31a53b27..c0eebd3a 100644 --- a/benchmarks/ReAct_agent/utils/tools.py +++ b/benchmarks/ReAct_agent/utils/tools.py @@ -9,15 +9,17 @@ Apply the similar code for wikipedia search from the Paper (open-source). """ + # copy code from the paper def clean_str(p): - return p.encode().decode("unicode-escape").encode("latin1").decode("utf-8") + return p.encode().decode("unicode-escape").encode("latin1").decode("utf-8") + # normalization copied from the paper's code def normalize_answer(s): def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) - + def white_space_fix(text): return " ".join(text.split()) @@ -39,29 +41,33 @@ def search(entity: str) -> str: # Format the entity for URL encoding entity_formatted = entity.replace(" ", "+") url = f"https://en.wikipedia.org/w/index.php?search={entity_formatted}" - + # Fetch the page response = requests.get(url) - soup = BeautifulSoup(response.text, 'html.parser') - + soup = BeautifulSoup(response.text, "html.parser") + # Check if the exact page was found or suggest similar items # when
is detected, it means the entity page is not found on wikipedia result_divs = soup.find_all("div", {"class": "mw-search-result-heading"}) - - if result_divs: # this means the searched entity page is not in wikipedia, wikipedia will show a list of similar entities + + if ( + result_divs + ): # this means the searched entity page is not in wikipedia, wikipedia will show a list of similar entities # get Similar results similar_titles = [div.a.get_text() for div in result_divs] - return f"Could not find exact page for '{entity}'. Similar topics: {similar_titles[:5]}" # return the top 5 similar titles + return f"Could not find exact page for '{entity}'. Similar topics: {similar_titles[:5]}" # return the top 5 similar titles else: # the paper uses page to represent content in

# Extract xontent - page_list = [p.get_text().strip() for p in soup.find_all("p") + soup.find_all("ul")] + page_list = [ + p.get_text().strip() for p in soup.find_all("p") + soup.find_all("ul") + ] # TODO: Recursive search, if find any concept that needs more search then call search again # if any("may refer to:" in p for p in page_list): # search(entity) # restructure & clean the page content following the paper's logic - page = '' + page = "" for p in page_list: if len(p.split(" ")) > 2: page += clean_str(p) @@ -69,28 +75,36 @@ def search(entity: str) -> str: page += "\n" paragraphs = page.split("\n") paragraphs = [p.strip() for p in paragraphs if p.strip()] - + sentences = [] for p in paragraphs: - sentences += p.split('. ') - sentences = [s.strip() + '.' for s in sentences if s.strip()] - + sentences += p.split(". ") + sentences = [s.strip() + "." for s in sentences if s.strip()] + # return the first 5 sentences if sentences: - return ' '.join(sentences[:5]) if len(sentences)>=5 else ' '.join(sentences) + return ( + " ".join(sentences[:5]) if len(sentences) >= 5 else " ".join(sentences) + ) else: return "No content found on this page." - + # TODO: clean the paragraphs and return the searched content def lookup(text: str, keyword: str) -> str: """ - returns the sentences containing keyword in the current passage. + returns the sentences containing keyword in the current passage. """ - sentences = text.split('.') - matching_sentences = [sentence.strip() + '.' for sentence in sentences if keyword.lower() in sentence.lower()] + sentences = text.split(".") + matching_sentences = [ + sentence.strip() + "." + for sentence in sentences + if keyword.lower() in sentence.lower() + ] if not matching_sentences: return "No sentences found with the keyword." else: - return ' '.join(matching_sentences) # Join all matching sentences into a single string + return " ".join( + matching_sentences + ) # Join all matching sentences into a single string diff --git a/benchmarks/hotpot_qa/adal_exp/train_vanilla.py b/benchmarks/hotpot_qa/adal_exp/train_vanilla.py index 6e87a990..b6cfe9e6 100644 --- a/benchmarks/hotpot_qa/adal_exp/train_vanilla.py +++ b/benchmarks/hotpot_qa/adal_exp/train_vanilla.py @@ -114,7 +114,7 @@ def train( **gpt_3_model, teacher_model_config=gpt_4o_model, text_optimizer_model_config=gpt_4o_model, - backward_engine_model_config=gpt_4o_model + backward_engine_model_config=gpt_4o_model, ) print(adal_component) trainer = adal.Trainer( diff --git a/docs/Makefile b/docs/Makefile index 7a9152fd..9e4cbe75 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -8,7 +8,7 @@ SOURCEDIR = source BUILDDIR = build APIDOCOUTDIR = $(SOURCEDIR)/apis PYTHON := $(shell command -v python3 2>/dev/null || command -v python 2>/dev/null) -POETRY = poetry +POETRY = poetry # Put it first so that "make" without argument is like "make help". help: diff --git a/docs/requirements.txt b/docs/requirements.txt index e59cca03..14ce30d7 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -8,4 +8,4 @@ PyYAML readthedocs-sphinx-search==0.3.2 numpy tqdm -tiktoken \ No newline at end of file +tiktoken diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css index 95445edc..73ee6a04 100644 --- a/docs/source/_static/css/custom.css +++ b/docs/source/_static/css/custom.css @@ -344,4 +344,4 @@ table tr:hover { font-size: 8px; /* Further adjust text size for smallest screens */ } -} \ No newline at end of file +} diff --git a/docs/source/contributor/version_control.rst b/docs/source/contributor/version_control.rst index 456c7528..30c2480e 100644 --- a/docs/source/contributor/version_control.rst +++ b/docs/source/contributor/version_control.rst @@ -7,7 +7,7 @@ Overview -------- **The version will mainly be managed by the LightRAG team. But we are glad to share how we will release the latest version here.** -This guide outlines the process for releasing a new version of ``LightRAG``. +This guide outlines the process for releasing a new version of ``LightRAG``. The workflow pipeline validates the version tag, builds the package, runs tests, publishes to PyPI, and creates a release on GitHub. The workflow is triggered by tags pushed to the **Release** branch. See `GitHub tags `_ for more details on version release tagging. Steps to Release a New Version @@ -18,7 +18,7 @@ Steps to Release a New Version [tool.poetry] name = "lightrag" - + version = "0.0.0-rc.1" description = "The 'PyTorch' library for LLM applications. RAG=Retriever-Agent-Generator." @@ -49,7 +49,7 @@ Steps to Release a New Version git add lightrag/pyproject.toml git commit -m "new version release" git push origin release - + Since the workflow only processes **tags**, your file submission will not go through the version release workflow. Only the tags you pushed will get checked. @@ -66,7 +66,7 @@ Steps to Release a New Version .. code-block:: python git tags # list the existing tags - + git tag -d git push origin --delete diff --git a/docs/source/resources/resources.rst b/docs/source/resources/resources.rst index 08f77d35..4affa68e 100644 --- a/docs/source/resources/resources.rst +++ b/docs/source/resources/resources.rst @@ -3,7 +3,3 @@ Resources Please check the GitHub for more information: `GitHub repository `_ - - - - diff --git a/use_cases/classification/train.py b/use_cases/classification/train.py index f287c164..0bdbd562 100644 --- a/use_cases/classification/train.py +++ b/use_cases/classification/train.py @@ -126,7 +126,7 @@ def train( debug=False, max_steps=12, strategy="constrained", - optimization_order="sequential" + optimization_order="sequential", ) # val 0.694 -> 0.833, #test 0.8472 -> 0.833, adding more shots does not help # NOTE: raw: 40, bootstrap: 4, max_steps: 8, strategy: random, val: 86.1, test: 86.8 (+4.2% compared with dspy) diff --git a/use_cases/question_answering/bbh/object_count/train_new.py b/use_cases/question_answering/bbh/object_count/train_new.py index c4c64fbc..280f7c1a 100644 --- a/use_cases/question_answering/bbh/object_count/train_new.py +++ b/use_cases/question_answering/bbh/object_count/train_new.py @@ -111,7 +111,7 @@ def train( **gpt_3_model, teacher_model_config=gpt_4o_model, text_optimizer_model_config=gpt_4o_model, - backward_engine_model_config=gpt_4o_model + backward_engine_model_config=gpt_4o_model, ) print(adal_component) trainer = adal.Trainer( From 3b0e5321a67acfca2ddace575fb67fc091f20f4d Mon Sep 17 00:00:00 2001 From: Li Yin Date: Thu, 21 Nov 2024 12:28:39 -0800 Subject: [PATCH 06/10] improve the contributor message --- docs/source/contributor/index.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/source/contributor/index.rst b/docs/source/contributor/index.rst index e4ea677b..7e2c2da0 100644 --- a/docs/source/contributor/index.rst +++ b/docs/source/contributor/index.rst @@ -1,19 +1,20 @@ Contributor Guide ============================= -Welcome to the AdalFlow community! We're building the most user-friendly, modular, and powerful library for building and auto-optimizing LLM applications, rom Chatbots and RAGs to Agents. +Welcome to the AdalFlow community! We're building the most user-friendly, modular, and powerful library for building and auto-optimizing LLM applications, from Chatbots and RAGs to Agents. *Think of AdalFlow for LLM applications and prompt engineering as the PyTorch/TensorFlow/JAX equivalent for AI modeling.* -The goal is to provide basic and fundamental building blocks to create advanced applications with auto-optimization out of the box. + +The goal of the library is to provide basic and fundamental building blocks to create advanced applications with auto-optimization out of the box. As we mature, we anticipate that more RAG, memory-based chatbots, or agent frameworks will be built on top of AdalFlow’s building blocks, such as `retriever` and `generator`. We highly suggest you read our :ref:`design principle` before you start contributing. -We only accept high quality contributions. -We appreciate contributors, but we have to hold our libary responsible for our users. -Once you decide to contribute, we hope it’s not just to list your name on the repo. -More importantly, we want you to learn and improve your own skills, support your favorite projects, and contribute to the community! +We greatly appreciate all contributions, from bug fixes to new features, and value every contributor. +However, we must be selective to ensure our library remains reliable for users. +We hope your contributions go beyond listing your name on the repo—our goal is for you to learn, grow your skills, support your favorite projects, and give back to the community! +The goal of this guide is to design the best process for maintaining the quality of our library while enabling the community to make meaningful contributions. It took us three months to set up this contributor guide, as we first tested the process with early contributors. -Our goal is to design the best process for maintaining the quality of our library while enabling the community to make meaningful contributions. -We are determined to make AdalFlow as great and legendary as PyTorch. +*We are determined to make AdalFlow as great and legendary as PyTorch.* + This guide covers the overall contributing process, along with development essentials for environment setup, coding, testing, and documentation. Here’s to the future of LLM applications! From ab392da1120126ae3de7355474540962b18256e5 Mon Sep 17 00:00:00 2001 From: Li Yin Date: Fri, 22 Nov 2024 07:51:33 -0800 Subject: [PATCH 07/10] add ruff and black as a dev dependencides in root project toml --- poetry.lock | 84 +++++++++++++++++++++++++++++++++++++++++++++++++- pyproject.toml | 2 ++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index 92749681..e4788fc3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -470,6 +470,50 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "black" +version = "24.10.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.9" +files = [ + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + [[package]] name = "bleach" version = "6.1.0" @@ -3601,6 +3645,17 @@ files = [ qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["docopt", "pytest"] +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + [[package]] name = "pexpect" version = "4.9.0" @@ -4744,6 +4799,33 @@ files = [ {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, ] +[[package]] +name = "ruff" +version = "0.8.0" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.8.0-py3-none-linux_armv6l.whl", hash = "sha256:fcb1bf2cc6706adae9d79c8d86478677e3bbd4ced796ccad106fd4776d395fea"}, + {file = "ruff-0.8.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:295bb4c02d58ff2ef4378a1870c20af30723013f441c9d1637a008baaf928c8b"}, + {file = "ruff-0.8.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7b1f1c76b47c18fa92ee78b60d2d20d7e866c55ee603e7d19c1e991fad933a9a"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb0d4f250a7711b67ad513fde67e8870109e5ce590a801c3722580fe98c33a99"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e55cce9aa93c5d0d4e3937e47b169035c7e91c8655b0974e61bb79cf398d49c"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f4cd64916d8e732ce6b87f3f5296a8942d285bbbc161acee7fe561134af64f9"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c5c1466be2a2ebdf7c5450dd5d980cc87c8ba6976fb82582fea18823da6fa362"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2dabfd05b96b7b8f2da00d53c514eea842bff83e41e1cceb08ae1966254a51df"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:facebdfe5a5af6b1588a1d26d170635ead6892d0e314477e80256ef4a8470cf3"}, + {file = "ruff-0.8.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87a8e86bae0dbd749c815211ca11e3a7bd559b9710746c559ed63106d382bd9c"}, + {file = "ruff-0.8.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:85e654f0ded7befe2d61eeaf3d3b1e4ef3894469cd664ffa85006c7720f1e4a2"}, + {file = "ruff-0.8.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:83a55679c4cb449fa527b8497cadf54f076603cc36779b2170b24f704171ce70"}, + {file = "ruff-0.8.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:812e2052121634cf13cd6fddf0c1871d0ead1aad40a1a258753c04c18bb71bbd"}, + {file = "ruff-0.8.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:780d5d8523c04202184405e60c98d7595bdb498c3c6abba3b6d4cdf2ca2af426"}, + {file = "ruff-0.8.0-py3-none-win32.whl", hash = "sha256:5fdb6efecc3eb60bba5819679466471fd7d13c53487df7248d6e27146e985468"}, + {file = "ruff-0.8.0-py3-none-win_amd64.whl", hash = "sha256:582891c57b96228d146725975fbb942e1f30a0c4ba19722e692ca3eb25cc9b4f"}, + {file = "ruff-0.8.0-py3-none-win_arm64.whl", hash = "sha256:ba93e6294e9a737cd726b74b09a6972e36bb511f9a102f1d9a7e1ce94dd206a6"}, + {file = "ruff-0.8.0.tar.gz", hash = "sha256:a7ccfe6331bf8c8dad715753e157457faf7351c2b69f62f32c165c2dbcbacd44"}, +] + [[package]] name = "s3transfer" version = "0.10.2" @@ -6130,4 +6212,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.11, <4.0" -content-hash = "e37147771ba46212166b62327cb83b76012da7ddbcc57aa5088ff7418eef2393" +content-hash = "6f206712012373417ade22f41508b5111eb14b3520a14ac76a125d51718e3a54" diff --git a/pyproject.toml b/pyproject.toml index ce9be698..1f541368 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,8 @@ faiss-cpu = "^1.8.0.post1" nltk = "^3.9.1" ragas = "^0.1.16" colorama = "^0.4.6" +black = "^24.10.0" +ruff = "^0.8.0" [build-system] From 9461d601d4f361337b09b07b42cd2ba390de5f39 Mon Sep 17 00:00:00 2001 From: Li Yin Date: Fri, 22 Nov 2024 08:26:46 -0800 Subject: [PATCH 08/10] test --- .pre-commit-config.yaml | 21 +- Makefile | 2 +- .../model_client/anthropic_client.py | 3 +- adalflow/tests/test_random_sample.py | 5 +- ...lflow_object_count_auto_optimization.ipynb | 16077 ++++++++-------- .../tutorials/adalflow_dataclasses.ipynb | 1916 +- pyproject.toml | 6 +- tutorials/component.ipynb | 16 +- tutorials/dataclass.ipynb | 26 +- tutorials/embedder.ipynb | 1 - tutorials/generator.ipynb | 4 +- tutorials/model_client.ipynb | 1 - tutorials/react_note.ipynb | 2 +- tutorials/retriever.ipynb | 5 +- tutorials/tools.ipynb | 67 +- 15 files changed, 9072 insertions(+), 9080 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 111c91f9..50db8611 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,13 +16,22 @@ repos: args: ['--line-length=88'] exclude: ^docs/|.*\.(json|yaml|md|txt)$ - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.2 + # - repo: https://github.com/astral-sh/ruff-pre-commit + # rev: v0.4.2 + # hooks: + # # Run the linter. + # - id: ruff + # args: ['--fix'] + # exclude: ^docs/|.*\.(json|yaml|md|txt)$ + + # Add local hooks to run custom commands + - repo: local hooks: - # Run the linter. - - id: ruff - args: ['--fix', '--extend-ignore=E402'] - exclude: ^docs/|.*\.(json|yaml|md|txt)$ + - id: run-make-format + name: Run Make Format + entry: make format + language: system + pass_filenames: false # - repo: https://github.com/pycqa/flake8 # rev: 4.0.1 # hooks: diff --git a/Makefile b/Makefile index 33b7bce5..3670e02f 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ setup: .PHONY: format format: $(PYTHON) black $(SRC_DIR) - $(PYTHON) ruff check $(SRC_DIR) --fix + git ls-files | xargs pre-commit run black --files # Run lint checks using Ruff .PHONY: lint diff --git a/adalflow/adalflow/components/model_client/anthropic_client.py b/adalflow/adalflow/components/model_client/anthropic_client.py index 92fa535a..c6f4a34f 100644 --- a/adalflow/adalflow/components/model_client/anthropic_client.py +++ b/adalflow/adalflow/components/model_client/anthropic_client.py @@ -15,7 +15,8 @@ anthropic = safe_import( OptionalPackages.ANTHROPIC.value[0], OptionalPackages.ANTHROPIC.value[1] ) -import anthropic + +# import anthropic from anthropic import ( RateLimitError, APITimeoutError, diff --git a/adalflow/tests/test_random_sample.py b/adalflow/tests/test_random_sample.py index 3cc6f56f..e6abfb29 100644 --- a/adalflow/tests/test_random_sample.py +++ b/adalflow/tests/test_random_sample.py @@ -1,13 +1,12 @@ import unittest from typing import TypeVar +from adalflow.core.functional import random_sample + # Assuming the random_sample function is defined here or imported T_co = TypeVar("T_co", covariant=True) -from adalflow.core.functional import random_sample - - class TestRandomSample(unittest.TestCase): def setUp(self): diff --git a/notebooks/qas/adalflow_object_count_auto_optimization.ipynb b/notebooks/qas/adalflow_object_count_auto_optimization.ipynb index 65b8509c..ac7e3cbf 100644 --- a/notebooks/qas/adalflow_object_count_auto_optimization.ipynb +++ b/notebooks/qas/adalflow_object_count_auto_optimization.ipynb @@ -1,8121 +1,8120 @@ { - "cells": [ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "VVSOpjzJl_cx" + }, + "source": [ + "# 🤗 Welcome to AdalFlow!\n", + "## The library to build & auto-optimize any LLM task pipelines\n", + "\n", + "Thanks for trying us out, we're here to provide you with the best LLM application development experience you can dream of 😊 any questions or concerns you may have, [come talk to us on discord,](https://discord.gg/ezzszrRZvT) we're always here to help!\n", + "\n", + "\n", + "# Quick Links\n", + "\n", + "Github repo: https://github.com/SylphAI-Inc/AdalFlow\n", + "\n", + "Full Tutorials: https://adalflow.sylph.ai/index.html#.\n", + "\n", + "Deep dive on each API: check out the [developer notes](https://adalflow.sylph.ai/tutorials/index.html).\n", + "\n", + "Common use cases along with the auto-optimization: check out [Use cases](https://adalflow.sylph.ai/use_cases/index.html).\n", + "\n", + "# Outline\n", + "\n", + "*Note: As training can consume tokens fast, and the notebook runtime will reset everytime you use, it might be better for you to learn training in your local editor.*\n", + "\n", + "This is a quick introduction of AdalFlow on question answering use case end to end\n", + "\n", + "* Trainable Task pipeline with trainable parameters\n", + "* Create AdalComponent for your task pipeline\n", + "* Use Trainer to diagnose, debug, and to train.\n", + "\n", + "You can find all source code here: https://github.com/SylphAI-Inc/AdalFlow/tree/main/use_cases/question_answering/bhh_object_count\n", + "\n", + "**Here is the more detailed tutorial for the code here: https://adalflow.sylph.ai/use_cases/question_answering.html**\n", + "\n", + "\n", + "# Installation\n", + "\n", + "1. Use `pip` to install the `adalflow` Python package. We will need `openai`, `groq`, and `faiss`(cpu version) from the extra packages.\n", + "\n", + " ```bash\n", + " pip install adalflow[openai,groq,faiss-cpu]\n", + " ```\n", + "2. Setup `openai` and `groq` API key in the environment variables" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "THTvmhjgfiHE" + }, + "outputs": [], + "source": [ + "from IPython.display import clear_output\n", + "\n", + "!pip install -U adalflow[openai,groq,datasets]\n", + "\n", + "clear_output()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "nJteJKsNrpcu", + "outputId": "d9f7b4d0-d11c-480d-d858-bf9022c18998" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "VVSOpjzJl_cx" + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" }, - "source": [ - "# 🤗 Welcome to AdalFlow!\n", - "## The library to build & auto-optimize any LLM task pipelines\n", - "\n", - "Thanks for trying us out, we're here to provide you with the best LLM application development experience you can dream of 😊 any questions or concerns you may have, [come talk to us on discord,](https://discord.gg/ezzszrRZvT) we're always here to help!\n", - "\n", - "\n", - "# Quick Links\n", - "\n", - "Github repo: https://github.com/SylphAI-Inc/AdalFlow\n", - "\n", - "Full Tutorials: https://adalflow.sylph.ai/index.html#.\n", - "\n", - "Deep dive on each API: check out the [developer notes](https://adalflow.sylph.ai/tutorials/index.html).\n", - "\n", - "Common use cases along with the auto-optimization: check out [Use cases](https://adalflow.sylph.ai/use_cases/index.html).\n", - "\n", - "# Outline\n", - "\n", - "*Note: As training can consume tokens fast, and the notebook runtime will reset everytime you use, it might be better for you to learn training in your local editor.*\n", - "\n", - "This is a quick introduction of AdalFlow on question answering use case end to end\n", - "\n", - "* Trainable Task pipeline with trainable parameters\n", - "* Create AdalComponent for your task pipeline\n", - "* Use Trainer to diagnose, debug, and to train.\n", - "\n", - "You can find all source code here: https://github.com/SylphAI-Inc/AdalFlow/tree/main/use_cases/question_answering/bhh_object_count\n", - "\n", - "**Here is the more detailed tutorial for the code here: https://adalflow.sylph.ai/use_cases/question_answering.html**\n", - "\n", - "\n", - "# Installation\n", - "\n", - "1. Use `pip` to install the `adalflow` Python package. We will need `openai`, `groq`, and `faiss`(cpu version) from the extra packages.\n", - "\n", - " ```bash\n", - " pip install adalflow[openai,groq,faiss-cpu]\n", - " ```\n", - "2. Setup `openai` and `groq` API key in the environment variables" + "text/plain": [ + "'0.2.0'" ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import adalflow as adal\n", + "\n", + "adal.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KapUyHMM07pJ" + }, + "source": [ + "## Set Environment Variables\n", + "\n", + "Run the following code and pass your api key.\n", + "\n", + "Note: for normal `.py` projects, follow our [official installation guide](https://lightrag.sylph.ai/get_started/installation.html).\n", + "\n", + "*Go to [OpenAI](https://platform.openai.com/docs/introduction) and [Groq](https://console.groq.com/docs/) to get API keys if you don't already have.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "ONfzF9Puzdd_", + "outputId": "6a815e21-ab99-463e-c53b-e39ca2ce8f3f" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "THTvmhjgfiHE" - }, - "outputs": [], - "source": [ - "from IPython.display import clear_output\n", - "\n", - "!pip install -U adalflow[openai,groq,datasets]\n", - "\n", - "clear_output()" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Please enter your OpenAI API key: ··········\n", + "Please enter your GROQ API key: ··········\n", + "API keys have been set.\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "groq_api_key = getpass(\"Please enter your GROQ API key, simplly press Enter if you don't have one: \")\n", + "\n", + "\n", + "# Set environment variables\n", + "os.environ['OPENAI_API_KEY'] = openai_api_key\n", + "os.environ['GROQ_API_KEY'] = groq_api_key\n", + "\n", + "print(\"API keys have been set.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SfGS7iddtfpj" + }, + "source": [ + "\n", + "\n", + "# 😇 Trainable Task Pipeline\n", + "\n", + "We will create a task pipeline consists of a generator, with a customzied template, a customized output parser.\n", + "\n", + "Different from our other pipelines where the `prompt_kwargs` values are strings, but here we will use ``Parameter``. And we will set up two parameter, one is of ``ParameterType.PROMPT`` and the other of type ``ParameterType.DEMOS``. The first one will be trained by text-grad and the second will be trained by boostrap few shot optimizer.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nHnvAbO-pXUq" + }, + "outputs": [], + "source": [ + "import adalflow as adal\n", + "import re\n", + "from typing import Dict, Union\n", + "import adalflow as adal\n", + "from adalflow.optim.types import ParameterType\n", + "\n", + "\n", + "@adal.fun_to_component\n", + "def parse_integer_answer(answer: str):\n", + " \"\"\"A function that parses the last integer from a string using regular expressions.\"\"\"\n", + " try:\n", + " # Use regular expression to find all sequences of digits\n", + " numbers = re.findall(r\"\\d+\", answer)\n", + " if numbers:\n", + " # Get the last number found\n", + " answer = int(numbers[-1])\n", + " else:\n", + " answer = -1\n", + " except ValueError:\n", + " answer = -1\n", + "\n", + " return answer\n", + "\n", + "\n", + "few_shot_template = r\"\"\"\n", + "{{system_prompt}}\n", + "{# Few shot demos #}\n", + "{% if few_shot_demos is not none %}\n", + "Here are some examples:\n", + "{{few_shot_demos}}\n", + "{% endif %}\n", + "\n", + "\n", + "{{input_str}}\n", + "\n", + "\"\"\"\n", + "\n", + "class ObjectCountTaskPipeline(adal.Component):\n", + " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", + " super().__init__()\n", + "\n", + " system_prompt = adal.Parameter(\n", + " data=\"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\",\n", + " role_desc=\"To give task instruction to the language model in the system prompt\",\n", + " requires_opt=True,\n", + " param_type=ParameterType.PROMPT,\n", + " )\n", + " few_shot_demos = adal.Parameter(\n", + " data=None,\n", + " role_desc=\"To provide few shot demos to the language model\",\n", + " requires_opt=True, # Changed to True for few-shot learning\n", + " param_type=ParameterType.DEMOS,\n", + " )\n", + "\n", + " self.llm_counter = adal.Generator(\n", + " model_client=model_client,\n", + " model_kwargs=model_kwargs,\n", + " template=few_shot_template,\n", + " prompt_kwargs={\n", + " \"system_prompt\": system_prompt,\n", + " \"few_shot_demos\": few_shot_demos,\n", + " },\n", + " output_processors=parse_integer_answer,\n", + " use_cache=True,\n", + " )\n", + "\n", + " def call(\n", + " self, question: str, id: str = None\n", + " ) -> Union[adal.GeneratorOutput, adal.Parameter]:\n", + " output = self.llm_counter(prompt_kwargs={\"input_str\": question}, id=id)\n", + " return output\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AvZJjdzZa0cT" + }, + "source": [ + "Next, we will run this pipeline in both train and eval mode.\n", + "\n", + "#### Eval mode with GeneratorOutput\n", + "\n", + "Eval mode will output ``GeneratorOutput``.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Gks3yS8hcR6_" + }, + "source": [ + "\n", + "#### Train mode with different form of output\n", + "\n", + "Train mode will return ``Parameter``, where the `data` field will be the `raw_response`` from the GeneratorOutput, and we put the full GeneratorOutput at the ``full_response`` in the parameter.\n", + "\n", + "As the `data` field of the `Parameter` directly communicate with the Optimizer, which are an LLM itself, its better than they understand exactly the string response itself instead of the parsed one.\n", + "\n", + "Later you will see that we also use ``eval_input`` of the parameter to communicate with the `LossFunction` as that need the parsed final output." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "eqQSFnZOpfWJ", + "outputId": "05b5fc83-09d1-45f4-aacc-6d460fbdd7bd" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "nJteJKsNrpcu", - "outputId": "d9f7b4d0-d11c-480d-d858-bf9022c18998" - }, - "outputs": [ - { - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'0.2.0'" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import adalflow as adal\n", - "\n", - "adal.__version__" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "KapUyHMM07pJ" - }, - "source": [ - "## Set Environment Variables\n", - "\n", - "Run the following code and pass your api key.\n", - "\n", - "Note: for normal `.py` projects, follow our [official installation guide](https://lightrag.sylph.ai/get_started/installation.html).\n", - "\n", - "*Go to [OpenAI](https://platform.openai.com/docs/introduction) and [Groq](https://console.groq.com/docs/) to get API keys if you don't already have.*" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", + "ObjectCountTaskPipeline(\n", + " (llm_counter): Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + ")\n" + ] + } + ], + "source": [ + "from adalflow.components.model_client.openai_client import OpenAIClient\n", + "from adalflow.components.model_client.groq_client import GroqAPIClient\n", + "\n", + "\n", + "if len(os.environ['OPENAI_API_KEY']) > 1:\n", + " gpt_3_model = {\n", + " \"model_client\": OpenAIClient(),\n", + " \"model_kwargs\": {\n", + " \"model\": \"gpt-3.5-turbo\",\n", + " \"max_tokens\": 2000,\n", + " \"temperature\": 0.0,\n", + " \"top_p\": 0.99,\n", + " \"frequency_penalty\": 0,\n", + " \"presence_penalty\": 0,\n", + " \"stop\": None,\n", + " },\n", + " }\n", + " gpt_4o_model = {\n", + " \"model_client\": OpenAIClient(),\n", + " \"model_kwargs\": {\n", + " \"model\": \"gpt-4o\",\n", + " \"max_tokens\": 4000,\n", + " \"temperature\": 0.0,\n", + " \"top_p\": 0.99,\n", + " \"frequency_penalty\": 0,\n", + " \"presence_penalty\": 0,\n", + " \"stop\": None,\n", + " },\n", + " }\n", + "\n", + "if len(os.environ['GROQ_API_KEY']) > 1:\n", + " llama_3_1_model ={\n", + " \"model_client\": GroqAPIClient(),\n", + " \"model_kwargs\": {\n", + " \"model\": \"llama-3.1-8b-instant\"\n", + " }\n", + " }\n", + "\n", + "\n", + "question = \"I have a flute, a piano, a trombone, four stoves, a violin, an accordion, a clarinet, a drum, two lamps, and a trumpet. How many musical instruments do I have?\"\n", + "task_pipeline = ObjectCountTaskPipeline(**gpt_3_model)\n", + "print(task_pipeline)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "DE1xNdYvcXw8", + "outputId": "25844c2a-5d4c-4c68-8ca5-38b79ca5b398" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ONfzF9Puzdd_", - "outputId": "6a815e21-ab99-463e-c53b-e39ca2ce8f3f" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Please enter your OpenAI API key: ··········\n", - "Please enter your GROQ API key: ··········\n", - "API keys have been set.\n" - ] - } - ], - "source": [ - "import os\n", - "\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "groq_api_key = getpass(\"Please enter your GROQ API key, simplly press Enter if you don't have one: \")\n", - "\n", - "\n", - "# Set environment variables\n", - "os.environ['OPENAI_API_KEY'] = openai_api_key\n", - "os.environ['GROQ_API_KEY'] = groq_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "GeneratorOutput(id='1', data=8, error=None, usage=CompletionUsage(completion_tokens=77, prompt_tokens=113, total_tokens=190), raw_response='To find the total number of musical instruments you have, you simply need to count the individual instruments you listed. \\n\\nYou have:\\n- Flute\\n- Piano\\n- Trombone\\n- Violin\\n- Accordion\\n- Clarinet\\n- Drum\\n- Trumpet\\n\\nCounting each of these instruments, we get a total of 8 musical instruments.\\n\\nAnswer: 8', metadata=None)\n" + ] + } + ], + "source": [ + "answer = task_pipeline(question, id=\"1\")\n", + "print(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "AGUlUsGxcaby", + "outputId": "8c8588fe-2994-4d9e-c2d1-26453141f43f" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "SfGS7iddtfpj" - }, - "source": [ - "\n", - "\n", - "# 😇 Trainable Task Pipeline\n", - "\n", - "We will create a task pipeline consists of a generator, with a customzied template, a customized output parser.\n", - "\n", - "Different from our other pipelines where the `prompt_kwargs` values are strings, but here we will use ``Parameter``. And we will set up two parameter, one is of ``ParameterType.PROMPT`` and the other of type ``ParameterType.DEMOS``. The first one will be trained by text-grad and the second will be trained by boostrap few shot optimizer.\n", - "\n", - "\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Parameter(name=Generator_output, requires_opt=True, param_type=generator_output (The output of the generator.), role_desc=Output from (llm) Generator, data=To find the total number of musical instruments you have, you simply need to count the individual instruments you listed. \n", + "\n", + "You have:\n", + "- Flute\n", + "- Piano\n", + "- Trombone\n", + "- Violin\n", + "- Accordion\n", + "- Clarinet\n", + "- Drum\n", + "- Trumpet\n", + "\n", + "Counting each of these instruments, we get a total of 8 musical instruments.\n", + "\n", + "Answer: 8, predecessors={Parameter(name=To_provide, requires_opt=True, param_type=demos (A few examples to guide the language model.), role_desc=To provide few shot demos to the language model, data=None, predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), Parameter(name=To_give_ta, requires_opt=True, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=To give task instruction to the language model in the system prompt, data=You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value., predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={})}, gradients=[], raw_response=None, input_args={'prompt_kwargs': {'system_prompt': Parameter(name=To_give_ta, requires_opt=True, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=To give task instruction to the language model in the system prompt, data=You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value., predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'few_shot_demos': Parameter(name=To_provide, requires_opt=True, param_type=demos (A few examples to guide the language model.), role_desc=To provide few shot demos to the language model, data=None, predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'input_str': 'I have a flute, a piano, a trombone, four stoves, a violin, an accordion, a clarinet, a drum, two lamps, and a trumpet. How many musical instruments do I have?'}, 'model_kwargs': {'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, traces={})\n", + "full_response: GeneratorOutput(id=None, data=8, error=None, usage=CompletionUsage(completion_tokens=77, prompt_tokens=113, total_tokens=190), raw_response='To find the total number of musical instruments you have, you simply need to count the individual instruments you listed. \\n\\nYou have:\\n- Flute\\n- Piano\\n- Trombone\\n- Violin\\n- Accordion\\n- Clarinet\\n- Drum\\n- Trumpet\\n\\nCounting each of these instruments, we get a total of 8 musical instruments.\\n\\nAnswer: 8', metadata=None)\n" + ] + } + ], + "source": [ + "# set it to train mode\n", + "task_pipeline.train()\n", + "answer = task_pipeline(question, id=\"1\")\n", + "print(answer)\n", + "print(f\"full_response: {answer.full_response}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YDAiuFzcr4YA" + }, + "outputs": [], + "source": [ + "!pip install datasets\n", + "clear_output()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-Gvfcy2IcgWx" + }, + "source": [ + "### Load Datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AYBIGsIHpjMe" + }, + "outputs": [], + "source": [ + "from adalflow.datasets.big_bench_hard import BigBenchHard\n", + "from adalflow.utils.data import subset_dataset\n", + "\n", + "def load_datasets(max_samples: int = None):\n", + " \"\"\"Load the dataset\"\"\"\n", + " train_data = BigBenchHard(split=\"train\")\n", + " val_data = BigBenchHard(split=\"val\")\n", + " test_data = BigBenchHard(split=\"test\")\n", + "\n", + " # Limit the number of samples\n", + " if max_samples:\n", + " train_data = subset_dataset(train_data, max_samples)\n", + " val_data = subset_dataset(val_data, max_samples)\n", + " test_data = subset_dataset(test_data, max_samples)\n", + "\n", + " return train_data, val_data, test_data\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "asw-pJrid8ly", + "outputId": "31807c34-0de9-45e5-ebdd-778aa5313802" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "nHnvAbO-pXUq" - }, - "outputs": [], - "source": [ - "import adalflow as adal\n", - "import re\n", - "from typing import Dict, Union\n", - "import adalflow as adal\n", - "from adalflow.optim.types import ParameterType\n", - "\n", - "\n", - "@adal.fun_to_component\n", - "def parse_integer_answer(answer: str):\n", - " \"\"\"A function that parses the last integer from a string using regular expressions.\"\"\"\n", - " try:\n", - " # Use regular expression to find all sequences of digits\n", - " numbers = re.findall(r\"\\d+\", answer)\n", - " if numbers:\n", - " # Get the last number found\n", - " answer = int(numbers[-1])\n", - " else:\n", - " answer = -1\n", - " except ValueError:\n", - " answer = -1\n", - "\n", - " return answer\n", - "\n", - "\n", - "few_shot_template = r\"\"\"\n", - "{{system_prompt}}\n", - "{# Few shot demos #}\n", - "{% if few_shot_demos is not none %}\n", - "Here are some examples:\n", - "{{few_shot_demos}}\n", - "{% endif %}\n", - "\n", - "\n", - "{{input_str}}\n", - "\n", - "\"\"\"\n", - "\n", - "class ObjectCountTaskPipeline(adal.Component):\n", - " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", - " super().__init__()\n", - "\n", - " system_prompt = adal.Parameter(\n", - " data=\"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\",\n", - " role_desc=\"To give task instruction to the language model in the system prompt\",\n", - " requires_opt=True,\n", - " param_type=ParameterType.PROMPT,\n", - " )\n", - " few_shot_demos = adal.Parameter(\n", - " data=None,\n", - " role_desc=\"To provide few shot demos to the language model\",\n", - " requires_opt=True, # Changed to True for few-shot learning\n", - " param_type=ParameterType.DEMOS,\n", - " )\n", - "\n", - " self.llm_counter = adal.Generator(\n", - " model_client=model_client,\n", - " model_kwargs=model_kwargs,\n", - " template=few_shot_template,\n", - " prompt_kwargs={\n", - " \"system_prompt\": system_prompt,\n", - " \"few_shot_demos\": few_shot_demos,\n", - " },\n", - " output_processors=parse_integer_answer,\n", - " use_cache=True,\n", - " )\n", - "\n", - " def call(\n", - " self, question: str, id: str = None\n", - " ) -> Union[adal.GeneratorOutput, adal.Parameter]:\n", - " output = self.llm_counter(prompt_kwargs={\"input_str\": question}, id=id)\n", - " return output\n", - "\n", - "\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Example(id='d3f33ded-170a-4b87-9b0b-987d5fb7b817', question='I have a cauliflower, a stalk of celery, a cabbage, and a garlic. How many vegetables do I have?', answer='4')\n" + ] + } + ], + "source": [ + "# check the datasets\n", + "\n", + "train_data, val_data, test_data = load_datasets(max_samples=2)\n", + "print(train_data[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VAVtXE9xeEHt" + }, + "source": [ + "### Soft link to AdalFlow default file path\n", + "\n", + "Lets' match the default to the current project, so that you can see the downloaded data and later the checkpoints of the training." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1SaKH6dkeWus" + }, + "outputs": [], + "source": [ + "! ln -s /root/.adalflow /content/adalflow\n", + "\n", + "# go to files then you will see a folder named as adalflow" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YWZzOvAHenME" + }, + "source": [ + "# 😊 AdalComponent to define everything we need to train\n", + "\n", + "1. We need `backward_engine_model_config`` for ``backward_engine`` to compute gradient.\n", + "\n", + "2. We need ``text_optimizer_model_config`` for the `text optimizer` for propose new prompts.\n", + "\n", + "3. For the demo optimizer, we need a `teacher_model_config` to config a teacher generator, in this case, it is the `llm_counter`. The teacher will share the same prompt with the `llm_counter` but you can use a more advanced model.\n", + "\n", + "In general, we should have all of these parts to use a more advanced model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9QoNoMWD0rgV" + }, + "source": [ + "## 🧑 Diagnose\n", + "\n", + "Diagnose is more of an evaluation, but with detailed logs so that you can manually inspect the wrong output.\n", + "\n", + "This one shows the minimum config you need to get the `diagnose` work." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6mi7lM3U24Eg" + }, + "outputs": [], + "source": [ + "from adalflow.datasets.types import Example\n", + "from adalflow.eval.answer_match_acc import AnswerMatchAcc\n", + "\n", + "\n", + "class ObjectCountAdalComponent(adal.AdalComponent):\n", + " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", + " task = ObjectCountTaskPipeline(model_client, model_kwargs)\n", + " eval_fn = AnswerMatchAcc(type=\"exact_match\").compute_single_item\n", + " super().__init__(task=task, eval_fn=eval_fn)\n", + "\n", + " def prepare_task(self, sample: Example):\n", + " return self.task.call, {\"question\": sample.question, \"id\": sample.id}\n", + "\n", + " def prepare_eval(\n", + " self, sample: Example, y_pred: adal.GeneratorOutput\n", + " ) -> float:\n", + " y_label = -1\n", + " if (y_pred is not None and y_pred.data is not None): # if y_pred and y_pred.data: might introduce bug when the data is 0\n", + " y_label = y_pred.data\n", + " return self.eval_fn, {\"y\": y_label, \"y_gt\": sample.answer}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "eliPeVeM2wcP" + }, + "outputs": [], + "source": [ + "def diagnose(\n", + " model_client: adal.ModelClient,\n", + " model_kwargs: Dict,\n", + ") -> Dict:\n", + "\n", + " trainset, valset, testset = load_datasets()\n", + " # use max_samples=10 to test the code\n", + "\n", + " adal_component = ObjectCountAdalComponent(model_client, model_kwargs)\n", + " trainer = adal.Trainer(adaltask=adal_component)\n", + " trainer.diagnose(dataset=trainset, split=\"train\")\n", + " trainer.diagnose(dataset=valset, split=\"val\")\n", + " trainer.diagnose(dataset=testset, split=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "nKl9clcb3dFj", + "outputId": "676fbb96-c70b-40ab-ea15-93ade1aa9e66" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "AvZJjdzZa0cT" - }, - "source": [ - "Next, we will run this pipeline in both train and eval mode.\n", - "\n", - "#### Eval mode with GeneratorOutput\n", - "\n", - "Eval mode will output ``GeneratorOutput``.\n", - "\n" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "Gks3yS8hcR6_" - }, - "source": [ - "\n", - "#### Train mode with different form of output\n", - "\n", - "Train mode will return ``Parameter``, where the `data` field will be the `raw_response`` from the GeneratorOutput, and we put the full GeneratorOutput at the ``full_response`` in the parameter.\n", - "\n", - "As the `data` field of the `Parameter` directly communicate with the Optimizer, which are an LLM itself, its better than they understand exactly the string response itself instead of the parsed one.\n", - "\n", - "Later you will see that we also use ``eval_input`` of the parameter to communicate with the `LossFunction` as that need the parsed final output." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", + "Checkpoint path: /root/.adalflow/ckpt/ObjectCountAdalComponent\n", + "Save diagnose to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_train\n", + "Saving traces to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_train\n", + "all_generators: [('llm_counter', Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "))]\n", + "Registered callback for llm_counter, file path: /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_train/llm_counter_call.jsonl\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "eqQSFnZOpfWJ", - "outputId": "05b5fc83-09d1-45f4-aacc-6d460fbdd7bd" - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", - "ObjectCountTaskPipeline(\n", - " (llm_counter): Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - ")\n" - ] - } - ], - "source": [ - "from adalflow.components.model_client.openai_client import OpenAIClient\n", - "from adalflow.components.model_client.groq_client import GroqAPIClient\n", - "\n", - "\n", - "if len(os.environ['OPENAI_API_KEY']) > 1:\n", - " gpt_3_model = {\n", - " \"model_client\": OpenAIClient(),\n", - " \"model_kwargs\": {\n", - " \"model\": \"gpt-3.5-turbo\",\n", - " \"max_tokens\": 2000,\n", - " \"temperature\": 0.0,\n", - " \"top_p\": 0.99,\n", - " \"frequency_penalty\": 0,\n", - " \"presence_penalty\": 0,\n", - " \"stop\": None,\n", - " },\n", - " }\n", - " gpt_4o_model = {\n", - " \"model_client\": OpenAIClient(),\n", - " \"model_kwargs\": {\n", - " \"model\": \"gpt-4o\",\n", - " \"max_tokens\": 4000,\n", - " \"temperature\": 0.0,\n", - " \"top_p\": 0.99,\n", - " \"frequency_penalty\": 0,\n", - " \"presence_penalty\": 0,\n", - " \"stop\": None,\n", - " },\n", - " }\n", - "\n", - "if len(os.environ['GROQ_API_KEY']) > 1:\n", - " llama_3_1_model ={\n", - " \"model_client\": GroqAPIClient(),\n", - " \"model_kwargs\": {\n", - " \"model\": \"llama-3.1-8b-instant\"\n", - " }\n", - " }\n", - "\n", - "\n", - "question = \"I have a flute, a piano, a trombone, four stoves, a violin, an accordion, a clarinet, a drum, two lamps, and a trumpet. How many musical instruments do I have?\"\n", - "task_pipeline = ObjectCountTaskPipeline(**gpt_3_model)\n", - "print(task_pipeline)\n" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 5957.82it/s]\n", + "Evaluating step(0): 0.88 across 50 samples, Max potential: 0.88: 100%|██████████| 50/50 [00:15<00:00, 3.27it/s]\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "DE1xNdYvcXw8", - "outputId": "25844c2a-5d4c-4c68-8ca5-38b79ca5b398" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "GeneratorOutput(id='1', data=8, error=None, usage=CompletionUsage(completion_tokens=77, prompt_tokens=113, total_tokens=190), raw_response='To find the total number of musical instruments you have, you simply need to count the individual instruments you listed. \\n\\nYou have:\\n- Flute\\n- Piano\\n- Trombone\\n- Violin\\n- Accordion\\n- Clarinet\\n- Drum\\n- Trumpet\\n\\nCounting each of these instruments, we get a total of 8 musical instruments.\\n\\nAnswer: 8', metadata=None)\n" - ] - } - ], - "source": [ - "answer = task_pipeline(question, id=\"1\")\n", - "print(answer)" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "sorted_indices: [8, 16, 23, 25, 31, 47, 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 24, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49]\n", + "sorted_scores: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n", + "Loading log file: llm_counter_call.jsonl\n", + "Total error samples: 6\n", + "Save diagnose to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_val\n", + "Saving traces to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_val\n", + "all_generators: [('llm_counter', Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "))]\n", + "Registered callback for llm_counter, file path: /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_val/llm_counter_call.jsonl\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "AGUlUsGxcaby", - "outputId": "8c8588fe-2994-4d9e-c2d1-26453141f43f" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Parameter(name=Generator_output, requires_opt=True, param_type=generator_output (The output of the generator.), role_desc=Output from (llm) Generator, data=To find the total number of musical instruments you have, you simply need to count the individual instruments you listed. \n", - "\n", - "You have:\n", - "- Flute\n", - "- Piano\n", - "- Trombone\n", - "- Violin\n", - "- Accordion\n", - "- Clarinet\n", - "- Drum\n", - "- Trumpet\n", - "\n", - "Counting each of these instruments, we get a total of 8 musical instruments.\n", - "\n", - "Answer: 8, predecessors={Parameter(name=To_provide, requires_opt=True, param_type=demos (A few examples to guide the language model.), role_desc=To provide few shot demos to the language model, data=None, predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), Parameter(name=To_give_ta, requires_opt=True, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=To give task instruction to the language model in the system prompt, data=You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value., predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={})}, gradients=[], raw_response=None, input_args={'prompt_kwargs': {'system_prompt': Parameter(name=To_give_ta, requires_opt=True, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=To give task instruction to the language model in the system prompt, data=You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value., predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'few_shot_demos': Parameter(name=To_provide, requires_opt=True, param_type=demos (A few examples to guide the language model.), role_desc=To provide few shot demos to the language model, data=None, predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'input_str': 'I have a flute, a piano, a trombone, four stoves, a violin, an accordion, a clarinet, a drum, two lamps, and a trumpet. How many musical instruments do I have?'}, 'model_kwargs': {'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, traces={})\n", - "full_response: GeneratorOutput(id=None, data=8, error=None, usage=CompletionUsage(completion_tokens=77, prompt_tokens=113, total_tokens=190), raw_response='To find the total number of musical instruments you have, you simply need to count the individual instruments you listed. \\n\\nYou have:\\n- Flute\\n- Piano\\n- Trombone\\n- Violin\\n- Accordion\\n- Clarinet\\n- Drum\\n- Trumpet\\n\\nCounting each of these instruments, we get a total of 8 musical instruments.\\n\\nAnswer: 8', metadata=None)\n" - ] - } - ], - "source": [ - "# set it to train mode\n", - "task_pipeline.train()\n", - "answer = task_pipeline(question, id=\"1\")\n", - "print(answer)\n", - "print(f\"full_response: {answer.full_response}\")" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 3203.76it/s]\n", + "Evaluating step(0): 0.8 across 50 samples, Max potential: 0.8: 100%|██████████| 50/50 [00:15<00:00, 3.26it/s]\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "YDAiuFzcr4YA" - }, - "outputs": [], - "source": [ - "!pip install datasets\n", - "clear_output()" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "sorted_indices: [1, 2, 5, 10, 24, 36, 38, 42, 44, 47, 0, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 40, 41, 43, 45, 46, 48, 49]\n", + "sorted_scores: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n", + "Loading log file: llm_counter_call.jsonl\n", + "Total error samples: 10\n", + "Save diagnose to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_test\n", + "Saving traces to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_test\n", + "all_generators: [('llm_counter', Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "))]\n", + "Registered callback for llm_counter, file path: /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_test/llm_counter_call.jsonl\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "-Gvfcy2IcgWx" - }, - "source": [ - "### Load Datasets" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 5545.09it/s]\n", + "Evaluating step(0): 0.83 across 100 samples, Max potential: 0.83: 100%|██████████| 100/100 [00:28<00:00, 3.50it/s]" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AYBIGsIHpjMe" - }, - "outputs": [], - "source": [ - "from adalflow.datasets.big_bench_hard import BigBenchHard\n", - "from adalflow.utils.data import subset_dataset\n", - "\n", - "def load_datasets(max_samples: int = None):\n", - " \"\"\"Load the dataset\"\"\"\n", - " train_data = BigBenchHard(split=\"train\")\n", - " val_data = BigBenchHard(split=\"val\")\n", - " test_data = BigBenchHard(split=\"test\")\n", - "\n", - " # Limit the number of samples\n", - " if max_samples:\n", - " train_data = subset_dataset(train_data, max_samples)\n", - " val_data = subset_dataset(val_data, max_samples)\n", - " test_data = subset_dataset(test_data, max_samples)\n", - "\n", - " return train_data, val_data, test_data\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "sorted_indices: [7, 18, 19, 20, 23, 24, 25, 43, 58, 59, 63, 74, 75, 79, 85, 97, 99, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 22, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 60, 61, 62, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 76, 77, 78, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 98]\n", + "sorted_scores: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n", + "Loading log file: llm_counter_call.jsonl\n", + "Total error samples: 17\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "asw-pJrid8ly", - "outputId": "31807c34-0de9-45e5-ebdd-778aa5313802" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Example(id='d3f33ded-170a-4b87-9b0b-987d5fb7b817', question='I have a cauliflower, a stalk of celery, a cabbage, and a garlic. How many vegetables do I have?', answer='4')\n" - ] - } - ], - "source": [ - "# check the datasets\n", - "\n", - "train_data, val_data, test_data = load_datasets(max_samples=2)\n", - "print(train_data[0])" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "diagnose(**gpt_3_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dSu4VQri3y3D" + }, + "source": [ + "Now, you can go to `/content/adalflow/ckpt/ObjectCountAdalComponent/diagnose_train/stats.json` to view the average score for each split. And also the `diagnose.json` for different errors.\n", + "\n", + "Here is the overall score for each split.\n", + "\n", + "| Train | Val| Test |\n", + "|:--------- |:--------:| ---------:|\n", + "| 0.88 | 0.8 | 0.83 |\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1vzJyp-W0z7I" + }, + "source": [ + "## 🐛 Debug" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TmlCvJu804dJ" + }, + "source": [ + "## ✅ Train\n", + "\n", + "Now, let's start training." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4TWCn0did6-K" + }, + "outputs": [], + "source": [ + "from adalflow.datasets.types import Example\n", + "\n", + "\n", + "class ObjectCountAdalComponent(adal.AdalComponent):# noqa: F811\n", + " def __init__(\n", + " self,\n", + " model_client: adal.ModelClient,\n", + " model_kwargs: Dict,\n", + " backward_engine_model_config: Dict,\n", + " teacher_model_config: Dict,\n", + " text_optimizer_model_config: Dict,\n", + " ):\n", + " task = ObjectCountTaskPipeline(model_client, model_kwargs)\n", + " eval_fn = AnswerMatchAcc(type=\"exact_match\").compute_single_item\n", + " loss_fn = adal.EvalFnToTextLoss(\n", + " eval_fn=eval_fn,\n", + " eval_fn_desc=\"exact_match: 1 if str(y) == str(y_gt) else 0\",\n", + " )\n", + " super().__init__(task=task, eval_fn=eval_fn, loss_fn=loss_fn)\n", + "\n", + " self.backward_engine_model_config = backward_engine_model_config\n", + " self.teacher_model_config = teacher_model_config\n", + " self.text_optimizer_model_config = text_optimizer_model_config\n", + "\n", + " def prepare_task(self, sample: Example):\n", + " return self.task.call, {\"question\": sample.question, \"id\": sample.id}\n", + "\n", + "\n", + " def prepare_eval(\n", + " self, sample: Example, y_pred: adal.GeneratorOutput\n", + " ) -> float:\n", + " y_label = -1\n", + " if (y_pred is not None and y_pred.data is not None): # if y_pred and y_pred.data: might introduce bug when the data is 0\n", + " y_label = y_pred.data\n", + " return self.eval_fn, {\"y\": y_label, \"y_gt\": sample.answer}\n", + "\n", + " def prepare_loss(self, sample: Example, pred: adal.Parameter):\n", + " # prepare gt parameter\n", + " y_gt = adal.Parameter(\n", + " name=\"y_gt\",\n", + " data=sample.answer,\n", + " eval_input=sample.answer,\n", + " requires_opt=False,\n", + " )\n", + "\n", + " # pred's full_response is the output of the task pipeline which is GeneratorOutput\n", + " pred.eval_input = pred.full_response.data\n", + " return self.loss_fn, {\"kwargs\": {\"y\": pred, \"y_gt\": y_gt}}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dezwX2yn1eQS" + }, + "outputs": [], + "source": [ + "def train(\n", + " train_batch_size=4, # larger batch size is not that effective, probably because of llm's lost in the middle\n", + " raw_shots: int = 0,\n", + " bootstrap_shots: int = 1,\n", + " max_steps=1,\n", + " num_workers=4,\n", + " strategy=\"random\",\n", + " optimization_order=\"sequential\",\n", + " debug=False,\n", + " resume_from_ckpt=None,\n", + " exclude_input_fields_from_bootstrap_demos=False,\n", + "):\n", + " adal_component = ObjectCountAdalComponent(\n", + " **gpt_3_model,\n", + " teacher_model_config=gpt_4o_model,\n", + " text_optimizer_model_config=gpt_4o_model,\n", + " backward_engine_model_config=gpt_4o_model\n", + " )\n", + " print(adal_component)\n", + " trainer = adal.Trainer(\n", + " train_batch_size=train_batch_size,\n", + " adaltask=adal_component,\n", + " strategy=strategy,\n", + " max_steps=max_steps,\n", + " num_workers=num_workers,\n", + " raw_shots=raw_shots,\n", + " bootstrap_shots=bootstrap_shots,\n", + " debug=debug,\n", + " weighted_sampling=True,\n", + " optimization_order=optimization_order,\n", + " exclude_input_fields_from_bootstrap_demos=exclude_input_fields_from_bootstrap_demos,\n", + " )\n", + " print(trainer)\n", + "\n", + " train_dataset, val_dataset, test_dataset = load_datasets()\n", + " trainer.fit(\n", + " train_dataset=train_dataset,\n", + " val_dataset=val_dataset,\n", + " test_dataset=test_dataset,\n", + " debug=debug,\n", + " resume_from_ckpt=resume_from_ckpt,\n", + " )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NGKYozGt60Pp" + }, + "source": [ + "We use `Sequential` in default, we will end up with 24 steps in total, 12 for text optimizer and 12 for the demo optimizer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "yDwLwL0L7Rsw", + "outputId": "1b7e413b-a1d3-4388-fc0c-ca4b1c072585" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "VAVtXE9xeEHt" - }, - "source": [ - "### Soft link to AdalFlow default file path\n", - "\n", - "Lets' match the default to the current project, so that you can see the downloaded data and later the checkpoints of the training." - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "1SaKH6dkeWus" - }, - "outputs": [], - "source": [ - "! ln -s /root/.adalflow /content/adalflow\n", - "\n", - "# go to files then you will see a folder named as adalflow" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", + "ObjectCountAdalComponent(\n", + " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", + " (task): ObjectCountTaskPipeline(\n", + " (llm_counter): Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + " )\n", + " (loss_fn): EvalFnToTextLoss()\n", + ")\n", + "Trainer(\n", + " (adaltask): ObjectCountAdalComponent(\n", + " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", + " (task): ObjectCountTaskPipeline(\n", + " (llm_counter): Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + " )\n", + " (loss_fn): EvalFnToTextLoss()\n", + " )\n", + ")\n", + "raw_shots: 0, bootstrap_shots: 1\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator configured.\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Backward engine configured for all generators.\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "YWZzOvAHenME" - }, - "source": [ - "# 😊 AdalComponent to define everything we need to train\n", - "\n", - "1. We need `backward_engine_model_config`` for ``backward_engine`` to compute gradient.\n", - "\n", - "2. We need ``text_optimizer_model_config`` for the `text optimizer` for propose new prompts.\n", - "\n", - "3. For the demo optimizer, we need a `teacher_model_config` to config a teacher generator, in this case, it is the `llm_counter`. The teacher will share the same prompt with the `llm_counter` but you can use a more advanced model.\n", - "\n", - "In general, we should have all of these parts to use a more advanced model." - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 6482.70it/s]\n", + "Evaluating step(0): 0.8 across 50 samples, Max potential: 0.8: 100%|██████████| 50/50 [00:00<00:00, 347.01it/s]\n", + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 2017.67it/s]\n", + "Evaluating step(0): 0.83 across 100 samples, Max potential: 0.83: 100%|██████████| 100/100 [00:00<00:00, 286.59it/s]\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "9QoNoMWD0rgV" - }, - "source": [ - "## 🧑 Diagnose\n", - "\n", - "Diagnose is more of an evaluation, but with detailed logs so that you can manually inspect the wrong output.\n", - "\n", - "This one shows the minimum config you need to get the `diagnose` work." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Initial validation score: 0.8\n", + "Initial test score: 0.83\n", + "Checkpoint path: /root/.adalflow/ckpt/ObjectCountAdalComponent\n", + "save to /root/.adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "6mi7lM3U24Eg" - }, - "outputs": [], - "source": [ - "from adalflow.datasets.types import Example\n", - "from adalflow.eval.answer_match_acc import AnswerMatchAcc\n", - "\n", - "\n", - "class ObjectCountAdalComponent(adal.AdalComponent):\n", - " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", - " task = ObjectCountTaskPipeline(model_client, model_kwargs)\n", - " eval_fn = AnswerMatchAcc(type=\"exact_match\").compute_single_item\n", - " super().__init__(task=task, eval_fn=eval_fn)\n", - "\n", - " def prepare_task(self, sample: Example):\n", - " return self.task.call, {\"question\": sample.question, \"id\": sample.id}\n", - "\n", - " def prepare_eval(\n", - " self, sample: Example, y_pred: adal.GeneratorOutput\n", - " ) -> float:\n", - " y_label = -1\n", - " if (y_pred is not None and y_pred.data is not None): # if y_pred and y_pred.data: might introduce bug when the data is 0\n", - " y_label = y_pred.data\n", - " return self.eval_fn, {\"y\": y_label, \"y_gt\": sample.answer}" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 1: 0%| | 0/13 [00:00 Dict:\n", - "\n", - " trainset, valset, testset = load_datasets()\n", - " # use max_samples=10 to test the code\n", - "\n", - " adal_component = ObjectCountAdalComponent(model_client, model_kwargs)\n", - " trainer = adal.Trainer(adaltask=adal_component)\n", - " trainer.diagnose(dataset=trainset, split=\"train\")\n", - " trainer.diagnose(dataset=valset, split=\"val\")\n", - " trainer.diagnose(dataset=testset, split=\"test\")" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Skipping batch 0 as acc: 1.0\n", + "No proposal can improve the subset and full set, go to next step\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "nKl9clcb3dFj", - "outputId": "676fbb96-c70b-40ab-ea15-93ade1aa9e66" - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", - "Checkpoint path: /root/.adalflow/ckpt/ObjectCountAdalComponent\n", - "Save diagnose to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_train\n", - "Saving traces to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_train\n", - "all_generators: [('llm_counter', Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "))]\n", - "Registered callback for llm_counter, file path: /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_train/llm_counter_call.jsonl\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 5957.82it/s]\n", - "Evaluating step(0): 0.88 across 50 samples, Max potential: 0.88: 100%|██████████| 50/50 [00:15<00:00, 3.27it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "sorted_indices: [8, 16, 23, 25, 31, 47, 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 24, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49]\n", - "sorted_scores: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n", - "Loading log file: llm_counter_call.jsonl\n", - "Total error samples: 6\n", - "Save diagnose to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_val\n", - "Saving traces to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_val\n", - "all_generators: [('llm_counter', Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "))]\n", - "Registered callback for llm_counter, file path: /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_val/llm_counter_call.jsonl\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 3203.76it/s]\n", - "Evaluating step(0): 0.8 across 50 samples, Max potential: 0.8: 100%|██████████| 50/50 [00:15<00:00, 3.26it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "sorted_indices: [1, 2, 5, 10, 24, 36, 38, 42, 44, 47, 0, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 40, 41, 43, 45, 46, 48, 49]\n", - "sorted_scores: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n", - "Loading log file: llm_counter_call.jsonl\n", - "Total error samples: 10\n", - "Save diagnose to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_test\n", - "Saving traces to /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_test\n", - "all_generators: [('llm_counter', Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "))]\n", - "Registered callback for llm_counter, file path: /root/.adalflow/ckpt/ObjectCountAdalComponent/diagnose_test/llm_counter_call.jsonl\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 5545.09it/s]\n", - "Evaluating step(0): 0.83 across 100 samples, Max potential: 0.83: 100%|██████████| 100/100 [00:28<00:00, 3.50it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "sorted_indices: [7, 18, 19, 20, 23, 24, 25, 43, 58, 59, 63, 74, 75, 79, 85, 97, 99, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 22, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 60, 61, 62, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 76, 77, 78, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 98]\n", - "sorted_scores: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n", - "Loading log file: llm_counter_call.jsonl\n", - "Total error samples: 17\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "diagnose(**gpt_3_model)" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 384.73it/s]\n", + "Training: 100%|██████████| 4/4 [00:00<00:00, 927.64it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 754.71it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 12087.33it/s]\n", + "Training Step: 3: 15%|█▌ | 2/13 [00:00<00:01, 8.92it/s]" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "dSu4VQri3y3D" - }, - "source": [ - "Now, you can go to `/content/adalflow/ckpt/ObjectCountAdalComponent/diagnose_train/stats.json` to view the average score for each split. And also the `diagnose.json` for different errors.\n", - "\n", - "Here is the overall score for each split.\n", - "\n", - "| Train | Val| Test |\n", - "|:--------- |:--------:| ---------:|\n", - "| 0.88 | 0.8 | 0.83 |\n", - "\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Skipping batch 1 as acc: 1.0\n", + "No proposal can improve the subset and full set, go to next step\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "1vzJyp-W0z7I" - }, - "source": [ - "## 🐛 Debug" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 193.44it/s]\n", + "Training: 100%|██████████| 4/4 [00:00<00:00, 2761.68it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 810.38it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 11320.66it/s]\n", + "Training Step: 4: 15%|█▌ | 2/13 [00:00<00:01, 8.92it/s]" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "TmlCvJu804dJ" - }, - "source": [ - "## ✅ Train\n", - "\n", - "Now, let's start training." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Skipping batch 2 as acc: 1.0\n", + "No proposal can improve the subset and full set, go to next step\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4TWCn0did6-K" - }, - "outputs": [], - "source": [ - "from adalflow.datasets.types import Example\n", - "from adalflow.eval.answer_match_acc import AnswerMatchAcc\n", - "\n", - "\n", - "class ObjectCountAdalComponent(adal.AdalComponent):\n", - " def __init__(\n", - " self,\n", - " model_client: adal.ModelClient,\n", - " model_kwargs: Dict,\n", - " backward_engine_model_config: Dict,\n", - " teacher_model_config: Dict,\n", - " text_optimizer_model_config: Dict,\n", - " ):\n", - " task = ObjectCountTaskPipeline(model_client, model_kwargs)\n", - " eval_fn = AnswerMatchAcc(type=\"exact_match\").compute_single_item\n", - " loss_fn = adal.EvalFnToTextLoss(\n", - " eval_fn=eval_fn,\n", - " eval_fn_desc=\"exact_match: 1 if str(y) == str(y_gt) else 0\",\n", - " )\n", - " super().__init__(task=task, eval_fn=eval_fn, loss_fn=loss_fn)\n", - "\n", - " self.backward_engine_model_config = backward_engine_model_config\n", - " self.teacher_model_config = teacher_model_config\n", - " self.text_optimizer_model_config = text_optimizer_model_config\n", - "\n", - " def prepare_task(self, sample: Example):\n", - " return self.task.call, {\"question\": sample.question, \"id\": sample.id}\n", - "\n", - "\n", - " def prepare_eval(\n", - " self, sample: Example, y_pred: adal.GeneratorOutput\n", - " ) -> float:\n", - " y_label = -1\n", - " if (y_pred is not None and y_pred.data is not None): # if y_pred and y_pred.data: might introduce bug when the data is 0\n", - " y_label = y_pred.data\n", - " return self.eval_fn, {\"y\": y_label, \"y_gt\": sample.answer}\n", - "\n", - " def prepare_loss(self, sample: Example, pred: adal.Parameter):\n", - " # prepare gt parameter\n", - " y_gt = adal.Parameter(\n", - " name=\"y_gt\",\n", - " data=sample.answer,\n", - " eval_input=sample.answer,\n", - " requires_opt=False,\n", - " )\n", - "\n", - " # pred's full_response is the output of the task pipeline which is GeneratorOutput\n", - " pred.eval_input = pred.full_response.data\n", - " return self.loss_fn, {\"kwargs\": {\"y\": pred, \"y_gt\": y_gt}}" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 234.44it/s]\n", + "Training: 100%|██████████| 4/4 [00:00<00:00, 2487.72it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 1024.88it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 12018.06it/s]\n", + "Training Step: 5: 31%|███ | 4/13 [00:00<00:00, 11.90it/s]" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dezwX2yn1eQS" - }, - "outputs": [], - "source": [ - "def train(\n", - " train_batch_size=4, # larger batch size is not that effective, probably because of llm's lost in the middle\n", - " raw_shots: int = 0,\n", - " bootstrap_shots: int = 1,\n", - " max_steps=1,\n", - " num_workers=4,\n", - " strategy=\"random\",\n", - " optimization_order=\"sequential\",\n", - " debug=False,\n", - " resume_from_ckpt=None,\n", - " exclude_input_fields_from_bootstrap_demos=False,\n", - "):\n", - " adal_component = ObjectCountAdalComponent(\n", - " **gpt_3_model,\n", - " teacher_model_config=gpt_4o_model,\n", - " text_optimizer_model_config=gpt_4o_model,\n", - " backward_engine_model_config=gpt_4o_model\n", - " )\n", - " print(adal_component)\n", - " trainer = adal.Trainer(\n", - " train_batch_size=train_batch_size,\n", - " adaltask=adal_component,\n", - " strategy=strategy,\n", - " max_steps=max_steps,\n", - " num_workers=num_workers,\n", - " raw_shots=raw_shots,\n", - " bootstrap_shots=bootstrap_shots,\n", - " debug=debug,\n", - " weighted_sampling=True,\n", - " optimization_order=optimization_order,\n", - " exclude_input_fields_from_bootstrap_demos=exclude_input_fields_from_bootstrap_demos,\n", - " )\n", - " print(trainer)\n", - "\n", - " train_dataset, val_dataset, test_dataset = load_datasets()\n", - " trainer.fit(\n", - " train_dataset=train_dataset,\n", - " val_dataset=val_dataset,\n", - " test_dataset=test_dataset,\n", - " debug=debug,\n", - " resume_from_ckpt=resume_from_ckpt,\n", - " )\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Skipping batch 3 as acc: 1.0\n", + "No proposal can improve the subset and full set, go to next step\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "NGKYozGt60Pp" - }, - "source": [ - "We use `Sequential` in default, we will end up with 24 steps in total, 12 for text optimizer and 12 for the demo optimizer." - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 133.95it/s]\n", + "Training: 100%|██████████| 4/4 [00:00<00:00, 4552.84it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 392.05it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 770.69it/s]\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "yDwLwL0L7Rsw", - "outputId": "1b7e413b-a1d3-4388-fc0c-ca4b1c072585" - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", - "ObjectCountAdalComponent(\n", - " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", - " (task): ObjectCountTaskPipeline(\n", - " (llm_counter): Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - " )\n", - " (loss_fn): EvalFnToTextLoss()\n", - ")\n", - "Trainer(\n", - " (adaltask): ObjectCountAdalComponent(\n", - " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", - " (task): ObjectCountTaskPipeline(\n", - " (llm_counter): Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - " )\n", - " (loss_fn): EvalFnToTextLoss()\n", - " )\n", - ")\n", - "raw_shots: 0, bootstrap_shots: 1\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator configured.\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Backward engine configured for all generators.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 6482.70it/s]\n", - "Evaluating step(0): 0.8 across 50 samples, Max potential: 0.8: 100%|██████████| 50/50 [00:00<00:00, 347.01it/s]\n", - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 2017.67it/s]\n", - "Evaluating step(0): 0.83 across 100 samples, Max potential: 0.83: 100%|██████████| 100/100 [00:00<00:00, 286.59it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Initial validation score: 0.8\n", - "Initial test score: 0.83\n", - "Checkpoint path: /root/.adalflow/ckpt/ObjectCountAdalComponent\n", - "save to /root/.adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 1: 0%| | 0/13 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 445.28it/s]\n", - "Evaluating step(4): 1.0 across 4 samples, Max potential: 1.0: 100%|██████████| 4/4 [00:01<00:00, 2.67it/s]\n", - "Proposing: 0%| | 0/5 [00:03= 0.75\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1139.66it/s]\n", - "Evaluating step(5): 0.84 across 50 samples, Max potential: 0.84: 100%|██████████| 50/50 [00:16<00:00, 3.04it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer step: 0.84 > 0.8\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 1658.72it/s]\n", - "Evaluating step(4): 0.91 across 100 samples, Max potential: 0.91: 100%|██████████| 100/100 [00:29<00:00, 3.37it/s]\n", - "Training Step: 6: 38%|███▊ | 5/13 [00:56<02:18, 17.27s/it]\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 207.97it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.86it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 494.99it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 805.09it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 3\n", - "Moving batch error size: 1\n", - "Moving batch acc: 0.75\n", - "Moving batch correct size: 3\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 0.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "Subset loss backward time: 4.081957817077637\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 8/8 [00:00<00:00, 279.47it/s]\n", - "Evaluating step(6): 0.875 across 8 samples, Max potential: 0.875: 100%|██████████| 8/8 [00:01<00:00, 4.43it/s]\n", - "Proposing: 0%| | 0/5 [00:04= 0.875\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2336.58it/s]\n", - "Evaluating step(7): 0.84 across 50 samples, Max potential: 0.84: 100%|██████████| 50/50 [00:17<00:00, 2.88it/s]\n", - "Training Step: 8: 54%|█████▍ | 7/13 [01:37<01:58, 19.81s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.84 <= 0.84\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 148.75it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.04it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 345.11it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 7550.50it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 11\n", - "Moving batch error size: 1\n", - "Moving batch acc: 0.9166666666666666\n", - "Moving batch correct size: 11\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "Subset loss backward time: 2.337067127227783\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 16/16 [00:00<00:00, 481.75it/s]\n", - "Evaluating step(8): 0.875 across 16 samples, Max potential: 0.875: 100%|██████████| 16/16 [00:03<00:00, 5.21it/s]\n", - "Proposing: 0%| | 0/5 [00:06= 0.875\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1112.82it/s]\n", - "Evaluating step(9): 0.86 across 50 samples, Max potential: 0.86: 100%|██████████| 50/50 [00:16<00:00, 2.97it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer step: 0.86 > 0.84\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 2395.58it/s]\n", - "Evaluating step(8): 0.87 across 100 samples, Max potential: 0.87: 100%|██████████| 100/100 [00:30<00:00, 3.30it/s]\n", - "Training Step: 10: 69%|██████▉ | 9/13 [02:52<02:04, 31.23s/it]\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 212.83it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.04it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 655.18it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1241.84it/s]\n", - "Training Step: 11: 77%|███████▋ | 10/13 [02:55<01:07, 22.43s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Skipping batch 9 as acc: 1.0\n", - "No proposal can improve the subset and full set, go to next step\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 93.95it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.23it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 757.71it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1320.62it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 3\n", - "Moving batch error size: 1\n", - "Moving batch acc: 0.75\n", - "Moving batch correct size: 3\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 0.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "Subset loss backward time: 3.768970012664795\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 455.77it/s]\n", - "Evaluating step(10): 1.0 across 4 samples, Max potential: 1.0: 100%|██████████| 4/4 [00:00<00:00, 5.14it/s]\n", - "Proposing: 40%|████ | 2/5 [00:06<00:09, 3.17s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Pass full check: 1.0 >= 0.75\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1732.93it/s]\n", - "Evaluating step(11): 0.825 across 40 samples, Max potential: 0.86: 80%|████████ | 40/50 [00:18<00:04, 2.21it/s]\n", - "Training Step: 12: 85%|████████▍ | 11/13 [03:24<00:49, 24.61s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.8048780487804879 <= 0.86\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 128.86it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.24it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 470.20it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 2608.40it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 6\n", - "Moving batch error size: 2\n", - "Moving batch acc: 0.75\n", - "Moving batch correct size: 6\n", - "Moving batch error size: 2\n", - "Subset Error size: 2\n", - "Subset Correct size: 4\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 0.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "Subset loss backward time: 6.722561836242676\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00\n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item, paying special attention to quantities mentioned. Verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item, paying special attention to quantities mentioned. Verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item, paying special attention to quantities mentioned. Verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - ")\n", - "Teacher generator configured.\n", - "save to /root/.adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n", - "Starting step: 12\n", - "trainer_results: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 13: 0%| | 0/12 [00:00\n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - " )\n", - " (loss_fn): EvalFnToTextLoss()\n", - ")\n", - "Trainer(\n", - " (adaltask): ObjectCountAdalComponent(\n", - " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", - " (task): ObjectCountTaskPipeline(\n", - " (llm_counter): Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - " )\n", - " (loss_fn): EvalFnToTextLoss()\n", - " )\n", - ")\n", - "raw_shots: 0, bootstrap_shots: 1\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator configured.\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Backward engine configured for all generators.\n", - "Restoring prompts: PromptData(id='44f6083f-4cf7-4a9a-bf10-20d218ee4106', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True)\n", - "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 27: 0%| | 0/13 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 263.51it/s]\n", - "Evaluating step(2): 1.0 across 4 samples, Max potential: 1.0: 100%|██████████| 4/4 [00:00<00:00, 4.20it/s]\n", - "Proposing: 40%|████ | 2/5 [00:10<00:15, 5.11s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Pass full check: 1.0 >= 0.75\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2287.37it/s]\n", - "Evaluating step(29): 0.8158 across 38 samples, Max potential: 0.86: 76%|███████▌ | 38/50 [00:17<00:05, 2.17it/s]\n", - "Training Step: 30: 23%|██▎ | 3/13 [00:35<02:25, 14.59s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.7948717948717948 <= 0.86\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 268.93it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.69it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 603.76it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 8825.47it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 7\n", - "Moving batch error size: 1\n", - "Moving batch acc: 0.875\n", - "Moving batch correct size: 7\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "Subset loss backward time: 2.2182435989379883\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 8/8 [00:00<00:00, 281.73it/s]\n", - "Evaluating step(3): 1.0 across 8 samples, Max potential: 1.0: 100%|██████████| 8/8 [00:02<00:00, 2.96it/s]\n", - "Proposing: 20%|██ | 1/5 [00:08<00:34, 8.54s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Pass full check: 1.0 >= 0.875\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1910.10it/s]\n", - "Evaluating step(30): 0.72 across 25 samples, Max potential: 0.86: 50%|█████ | 25/50 [00:18<00:18, 1.38it/s]\n", - "Training Step: 31: 31%|███ | 4/13 [01:05<03:03, 20.39s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.6923076923076923 <= 0.86\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 310.31it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.75it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 454.32it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 12336.19it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 11\n", - "Moving batch error size: 1\n", - "Moving batch acc: 0.9166666666666666\n", - "Moving batch correct size: 11\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "Subset loss backward time: 2.028568983078003\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 12/12 [00:00<00:00, 724.90it/s]\n", - "Evaluating step(4): 1.0 across 12 samples, Max potential: 1.0: 100%|██████████| 12/12 [00:03<00:00, 3.66it/s]\n", - "Proposing: 0%| | 0/5 [00:05= 0.9166666666666666\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2233.56it/s]\n", - "Evaluating step(31): 0.8511 across 47 samples, Max potential: 0.86: 94%|█████████▍| 47/50 [00:16<00:01, 2.81it/s]\n", - "Training Step: 32: 38%|███▊ | 5/13 [01:31<02:58, 22.30s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.8333333333333334 <= 0.86\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 269.31it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.20it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 606.49it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1212.58it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 15\n", - "Moving batch error size: 1\n", - "Moving batch acc: 0.9375\n", - "Moving batch correct size: 15\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "Subset loss backward time: 3.2150633335113525\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00\n", - "500 Internal Server Error\n", - "\n", - "

500 Internal Server Error

\n", - "
nginx
\n", - "\n", - ")\n", - "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:50<00:00, 16.89s/it]\n", - "\n", - "Proposing: 20%|██ | 1/5 [00:52<03:28, 52.11s/it]\u001b[A" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", - "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 645.05it/s]\n", - "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 298.94it/s]\n", - "\n", - "Proposing: 40%|████ | 2/5 [00:53<01:07, 22.46s/it]\u001b[A" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", - "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 751.40it/s]\n", - "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 360.88it/s]\n", - "\n", - "Proposing: 60%|██████ | 3/5 [00:54<00:25, 12.66s/it]\u001b[A" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", - "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 332.13it/s]\n", - "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 276.08it/s]\n", - "\n", - "Proposing: 80%|████████ | 4/5 [00:55<00:08, 8.12s/it]\u001b[A" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", - "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 440.13it/s]\n", - "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 235.96it/s]\n", - "\n", - "Proposing: 100%|██████████| 5/5 [00:57<00:00, 11.41s/it]\n", - "Training Step: 33: 46%|████▌ | 6/13 [02:33<04:07, 35.35s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", - "Done with proposals\n", - "No proposal can improve the subset and full set, go to next step\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 317.05it/s]\n", - "Training: 100%|██████████| 4/4 [00:00<00:00, 676.47it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 543.36it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1518.44it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 18\n", - "Moving batch error size: 2\n", - "Moving batch acc: 0.9\n", - "Moving batch correct size: 18\n", - "Moving batch error size: 2\n", - "Subset Error size: 2\n", - "Subset Correct size: 4\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 0.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "Subset loss backward time: 7.857504606246948\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Loading Data: 100%|██████████| 20/20 [00:00<00:00, 649.93it/s]\n", - "Evaluating step(8): 0.95 across 20 samples, Max potential: 0.95: 100%|██████████| 20/20 [00:02<00:00, 8.93it/s]\n", - "Proposing: 20%|██ | 1/5 [00:08<00:35, 8.79s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Pass full check: 0.95 >= 0.9\n", - "Done with proposals\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2667.62it/s]\n", - "Evaluating step(35): 0.8511 across 47 samples, Max potential: 0.86: 94%|█████████▍| 47/50 [00:00<00:00, 559.52it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.8333333333333334 <= 0.86\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 36: 69%|██████▉ | 9/13 [03:21<01:29, 22.39s/it]\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 154.85it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.33it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 610.06it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1798.78it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Moving batch correct size: 22\n", - "Moving batch error size: 2\n", - "Moving batch acc: 0.95\n", - "Moving batch correct size: 19\n", - "Moving batch error size: 1\n", - "Subset Error size: 1\n", - "Subset Correct size: 2\n", - "Subset score: 0.6666666666666666\n", - "Subset batch acc: 0.6666666666666666\n", - "Subset loss backward...\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "Subset loss backward time: 2.553833246231079\n", - "Optimizer propose...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Proposing: 0%| | 0/5 [00:00\n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - ")\n", - "Teacher generator configured.\n", - "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n", - "Starting step: 38\n", - "trainer_results: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 39: 0%| | 0/12 [00:00 0.6666666666666666\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "78JAv4ULEn07", - "outputId": "e87bb360-fc26-4dbd-d163-86ab32c292df" - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:adalflow.core.generator:Error copying the prompt_kwargs: 'prompt' is not a valid ParameterType\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-3.5-turbo.db\n", - "ObjectCountAdalComponent(\n", - " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", - " (task): ObjectCountTaskPipeline(\n", - " (llm_counter): Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - " )\n", - " (loss_fn): EvalFnToTextLoss()\n", - ")\n", - "Trainer(\n", - " (adaltask): ObjectCountAdalComponent(\n", - " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", - " (task): ObjectCountTaskPipeline(\n", - " (llm_counter): Generator(\n", - " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - " )\n", - " )\n", - " (loss_fn): EvalFnToTextLoss()\n", - " )\n", - ")\n", - "raw_shots: 0, bootstrap_shots: 1\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator configured.\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Backward engine configured for all generators.\n", - "Restoring prompts: PromptData(id='44f6083f-4cf7-4a9a-bf10-20d218ee4106', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True)\n", - "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 51: 0%| | 0/13 [00:00 0.86\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 5848.08it/s]\n", - "Evaluating step(53): 0.9 across 100 samples, Max potential: 0.9: 100%|██████████| 100/100 [00:30<00:00, 3.32it/s]\n", - "Training Step: 54: 23%|██▎ | 3/13 [01:28<05:35, 33.51s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 297.78it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.95it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 407.40it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 8952.62it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1274.72it/s]\n", - "Evaluating step(54): 0.94 across 50 samples, Max potential: 0.94: 100%|██████████| 50/50 [00:16<00:00, 3.06it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer step: 0.94 > 0.88\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 6831.78it/s]\n", - "Evaluating step(54): 0.91 across 100 samples, Max potential: 0.91: 100%|██████████| 100/100 [00:30<00:00, 3.33it/s]\n", - "Training Step: 55: 31%|███ | 4/13 [02:21<06:10, 41.21s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 152.84it/s]\n", - "Training: 100%|██████████| 4/4 [00:03<00:00, 1.28it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 688.86it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1318.45it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 0.0\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data='You will answer a reasoning question. Carefully count each item and verify your total. List each item individually, ensuring each is counted as \"1\" regardless of quantity mentioned. Show your calculations step by step. The last line of your response should be: \\'Answer: $VALUE\\' where VALUE is a numerical value.', requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2011.16it/s]\n", - "Evaluating step(55): 0.8696 across 23 samples, Max potential: 0.94: 46%|████▌ | 23/50 [00:15<00:17, 1.52it/s]\n", - "Training Step: 56: 38%|███▊ | 5/13 [02:46<04:43, 35.43s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.8333333333333334 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 93.66it/s]\n", - "Training: 100%|██████████| 4/4 [00:02<00:00, 1.75it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 646.55it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 2217.45it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 4572.35it/s]\n", - "Evaluating step(56): 0.94 across 50 samples, Max potential: 0.94: 100%|██████████| 50/50 [00:00<00:00, 390.77it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.94 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Training Step: 57: 46%|████▌ | 6/13 [02:54<03:02, 26.03s/it]\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 145.48it/s]\n", - "Training: 100%|██████████| 4/4 [00:02<00:00, 1.52it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 375.76it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1437.76it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 0.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check for any grouped items and count them correctly. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1614.47it/s]\n", - "Evaluating step(57): 0.7857 across 14 samples, Max potential: 0.94: 28%|██▊ | 14/50 [00:19<00:50, 1.41s/it]\n", - "Training Step: 58: 54%|█████▍ | 7/13 [03:23<02:42, 27.04s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.7333333333333333 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 137.96it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.94it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 806.79it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 11522.81it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be formatted as: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 3560.17it/s]\n", - "Evaluating step(58): 0.88 across 25 samples, Max potential: 0.94: 50%|█████ | 25/50 [00:17<00:17, 1.45it/s]\n", - "Training Step: 59: 62%|██████▏ | 8/13 [03:47<02:10, 26.06s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.8461538461538461 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 93.90it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.70it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 552.01it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 5648.89it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 0.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1770.11it/s]\n", - "Evaluating step(59): 0.9286 across 42 samples, Max potential: 0.94: 84%|████████▍ | 42/50 [00:16<00:03, 2.49it/s]\n", - "Training Step: 60: 69%|██████▉ | 9/13 [04:13<01:43, 26.00s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.9069767441860465 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 314.86it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.10it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 722.53it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 7940.00it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count for precision. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 7188.43it/s]\n", - "Evaluating step(60): 0.8966 across 29 samples, Max potential: 0.94: 58%|█████▊ | 29/50 [00:15<00:11, 1.84it/s]\n", - "Training Step: 61: 77%|███████▋ | 10/13 [04:35<01:14, 24.87s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.8666666666666667 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 95.68it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.74it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 587.05it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 12520.31it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 3036.62it/s]\n", - "Evaluating step(61): 0.9286 across 42 samples, Max potential: 0.94: 84%|████████▍ | 42/50 [00:00<00:00, 327.89it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.9069767441860465 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Training Step: 62: 85%|████████▍ | 11/13 [04:44<00:40, 20.14s/it]\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 136.40it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 3.17it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 417.11it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 14339.50it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loss backward...\n", - "setting pred name Generator_outputy_pred_2 score to 1.0\n", - "setting pred name Generator_outputy_pred_0 score to 1.0\n", - "setting pred name Generator_outputy_pred_3 score to 1.0\n", - "setting pred name Generator_outputy_pred_1 score to 1.0\n", - "Optimizer propose...\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 5035.06it/s]\n", - "Evaluating step(62): 0.9286 across 42 samples, Max potential: 0.94: 84%|████████▍ | 42/50 [00:00<00:00, 327.19it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimizer revert: 0.9069767441860465 <= 0.94\n", - "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Training Step: 62: 92%|█████████▏| 12/13 [04:51<00:24, 24.28s/it]\n", - "Epoch: 0%| | 0/1 [04:51\n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " {{system_prompt}}\n", - " {# Few shot demos #}\n", - " {% if few_shot_demos is not none %}\n", - " Here are some examples:\n", - " {{few_shot_demos}}\n", - " {% endif %}\n", - " \n", - " \n", - " {{input_str}}\n", - " \n", - " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", - " )\n", - " (model_client): OpenAIClient()\n", - " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", - ")\n", - "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", - "Configuring teacher generator for Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - ")\n", - "Teacher generator set: Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - "), teacher Generator(\n", - " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", - " (prompt): Prompt(\n", - " template: \n", - " You are the feedback engine in an optimization system.\n", - " \n", - " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", - " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", - " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", - " 3. Consider the variable in the context of its peers if provided.\n", - " Remember:\n", - " Be concise, critical, and direct.\n", - " \n", - " \n", - " {{conversation_sec}}\n", - " \n", - " {{objective_instruction_sec}}\n", - " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", - " )\n", - " (model_client): OpenAIClient()\n", - ")\n", - "Teacher generator configured.\n", - "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n", - "Starting step: 62\n", - "trainer_results: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training Step: 63: 0%| | 0/12 [00:00 0.94\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 3294.35it/s]\n", - "Evaluating step(65): 0.95 across 100 samples, Max potential: 0.95: 100%|██████████| 100/100 [00:39<00:00, 2.51it/s]\n", - "Training Step: 66: 42%|████▏ | 5/12 [01:50<02:42, 23.20s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Training Step: 66\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 186.04it/s]\n", - "Training: 25%|██▌ | 1/4 [00:00<00:02, 1.01it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trace with id fe9b883c-4f47-44f7-a388-b03a2fb10413 already exists. Updating the trace.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\rTraining: 50%|█████ | 2/4 [00:01<00:01, 1.30it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trace with id 12a6ff3d-f54d-4d89-b5f0-1aec30e96398 already exists. Updating the trace.\n", - "Trace with id 840d9ed5-8222-45a9-a406-7445feae9733 already exists. Updating the trace.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training: 100%|██████████| 4/4 [00:02<00:00, 1.46it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trace with id 3a9a47c8-a210-43a4-8d24-b9159babb6e4 already exists. Updating the trace.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 636.54it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 9420.11it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 111.34it/s]\n", - "Training: 100%|██████████| 4/4 [00:01<00:00, 2.50it/s]\n", - "\n", - "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 321.28it/s]\n", - "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 731.61it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "sampled_augmented_demos: ['99607986-e107-46b8-b86b-177b295983c4']\n", - "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=\"input_str: I have a yam, a cauliflower, a bed, two cabbages, a garlic, an oven, a\\n carrot, a head of broccoli, a potato, a stalk of celery, a lettuce head, and a toaster.\\n How many vegetables do I have?\\nExample: 'Let''s list and count each vegetable individually:\\n\\n\\n 1. Yam\\n\\n 2. Cauliflower\\n\\n 3. Cabbage (1)\\n\\n 4. Cabbage (2)\\n\\n 5. Garlic\\n\\n 6. Carrot\\n\\n 7. Broccoli\\n\\n 8. Potato\\n\\n 9. Celery\\n\\n 10. Lettuce\\n\\n\\n Now, let''s verify the count:\\n\\n\\n 1. Yam\\n\\n 2. Cauliflower\\n\\n 3. Cabbage (1)\\n\\n 4. Cabbage (2)\\n\\n 5. Garlic\\n\\n 6. Carrot\\n\\n 7. Broccoli\\n\\n 8. Potato\\n\\n 9. Celery\\n\\n 10. Lettuce\\n\\n\\n Total number of vegetables: 10\\n\\n\\n Answer: 10'\", requires_opt=True)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1120.89it/s]\n", - "Evaluating step(66): 0.96 across 50 samples, Max potential: 0.96: 100%|██████████| 50/50 [00:00<00:00, 212.00it/s]\n", - "Training Step: 67: 58%|█████▊ | 7/12 [01:55<01:32, 18.51s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fail validation: 0.96 <= 0.96, revert\n", - "Training Step: 67\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Loading Data: 0%| | 0/4 [00:00= 0.75\n", + "Done with proposals\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "Fr0V3XNCHAis" - }, - "source": [ - "Happy Optimizing!!!" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1139.66it/s]\n", + "Evaluating step(5): 0.84 across 50 samples, Max potential: 0.84: 100%|██████████| 50/50 [00:16<00:00, 3.04it/s]\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "3Wnvqs3RyI_z" - }, - "source": [ - "# Issues and feedback\n", - "\n", - "If you encounter any issues, please report them here: [GitHub Issues](https://github.com/SylphAI-Inc/LightRAG/issues).\n", - "\n", - "For feedback, you can use either the [GitHub discussions](https://github.com/SylphAI-Inc/LightRAG/discussions) or [Discord](https://discord.gg/ezzszrRZvT)." - ] - } - ], - "metadata": { - "colab": { - "provenance": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer step: 0.84 > 0.8\n" + ] }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 1658.72it/s]\n", + "Evaluating step(4): 0.91 across 100 samples, Max potential: 0.91: 100%|██████████| 100/100 [00:29<00:00, 3.37it/s]\n", + "Training Step: 6: 38%|███▊ | 5/13 [00:56<02:18, 17.27s/it]\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 207.97it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.86it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 494.99it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 805.09it/s]\n" + ] }, - "language_info": { - "name": "python" - } + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 3\n", + "Moving batch error size: 1\n", + "Moving batch acc: 0.75\n", + "Moving batch correct size: 3\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 0.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "Subset loss backward time: 4.081957817077637\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 8/8 [00:00<00:00, 279.47it/s]\n", + "Evaluating step(6): 0.875 across 8 samples, Max potential: 0.875: 100%|██████████| 8/8 [00:01<00:00, 4.43it/s]\n", + "Proposing: 0%| | 0/5 [00:04= 0.875\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2336.58it/s]\n", + "Evaluating step(7): 0.84 across 50 samples, Max potential: 0.84: 100%|██████████| 50/50 [00:17<00:00, 2.88it/s]\n", + "Training Step: 8: 54%|█████▍ | 7/13 [01:37<01:58, 19.81s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.84 <= 0.84\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 148.75it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.04it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 345.11it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 7550.50it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 11\n", + "Moving batch error size: 1\n", + "Moving batch acc: 0.9166666666666666\n", + "Moving batch correct size: 11\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "Subset loss backward time: 2.337067127227783\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 16/16 [00:00<00:00, 481.75it/s]\n", + "Evaluating step(8): 0.875 across 16 samples, Max potential: 0.875: 100%|██████████| 16/16 [00:03<00:00, 5.21it/s]\n", + "Proposing: 0%| | 0/5 [00:06= 0.875\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1112.82it/s]\n", + "Evaluating step(9): 0.86 across 50 samples, Max potential: 0.86: 100%|██████████| 50/50 [00:16<00:00, 2.97it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer step: 0.86 > 0.84\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 2395.58it/s]\n", + "Evaluating step(8): 0.87 across 100 samples, Max potential: 0.87: 100%|██████████| 100/100 [00:30<00:00, 3.30it/s]\n", + "Training Step: 10: 69%|██████▉ | 9/13 [02:52<02:04, 31.23s/it]\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 212.83it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.04it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 655.18it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1241.84it/s]\n", + "Training Step: 11: 77%|███████▋ | 10/13 [02:55<01:07, 22.43s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Skipping batch 9 as acc: 1.0\n", + "No proposal can improve the subset and full set, go to next step\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 93.95it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.23it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 757.71it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1320.62it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 3\n", + "Moving batch error size: 1\n", + "Moving batch acc: 0.75\n", + "Moving batch correct size: 3\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 0.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "Subset loss backward time: 3.768970012664795\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 455.77it/s]\n", + "Evaluating step(10): 1.0 across 4 samples, Max potential: 1.0: 100%|██████████| 4/4 [00:00<00:00, 5.14it/s]\n", + "Proposing: 40%|████ | 2/5 [00:06<00:09, 3.17s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pass full check: 1.0 >= 0.75\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1732.93it/s]\n", + "Evaluating step(11): 0.825 across 40 samples, Max potential: 0.86: 80%|████████ | 40/50 [00:18<00:04, 2.21it/s]\n", + "Training Step: 12: 85%|████████▍ | 11/13 [03:24<00:49, 24.61s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.8048780487804879 <= 0.86\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 128.86it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.24it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 470.20it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 2608.40it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 6\n", + "Moving batch error size: 2\n", + "Moving batch acc: 0.75\n", + "Moving batch correct size: 6\n", + "Moving batch error size: 2\n", + "Subset Error size: 2\n", + "Subset Correct size: 4\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 0.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "Subset loss backward time: 6.722561836242676\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00\n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item, paying special attention to quantities mentioned. Verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item, paying special attention to quantities mentioned. Verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item, paying special attention to quantities mentioned. Verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + ")\n", + "Teacher generator configured.\n", + "save to /root/.adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n", + "Starting step: 12\n", + "trainer_results: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 13: 0%| | 0/12 [00:00\n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + " )\n", + " (loss_fn): EvalFnToTextLoss()\n", + ")\n", + "Trainer(\n", + " (adaltask): ObjectCountAdalComponent(\n", + " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", + " (task): ObjectCountTaskPipeline(\n", + " (llm_counter): Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + " )\n", + " (loss_fn): EvalFnToTextLoss()\n", + " )\n", + ")\n", + "raw_shots: 0, bootstrap_shots: 1\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator configured.\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Backward engine configured for all generators.\n", + "Restoring prompts: PromptData(id='44f6083f-4cf7-4a9a-bf10-20d218ee4106', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True)\n", + "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 27: 0%| | 0/13 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 263.51it/s]\n", + "Evaluating step(2): 1.0 across 4 samples, Max potential: 1.0: 100%|██████████| 4/4 [00:00<00:00, 4.20it/s]\n", + "Proposing: 40%|████ | 2/5 [00:10<00:15, 5.11s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pass full check: 1.0 >= 0.75\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2287.37it/s]\n", + "Evaluating step(29): 0.8158 across 38 samples, Max potential: 0.86: 76%|███████▌ | 38/50 [00:17<00:05, 2.17it/s]\n", + "Training Step: 30: 23%|██▎ | 3/13 [00:35<02:25, 14.59s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.7948717948717948 <= 0.86\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 268.93it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.69it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 603.76it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 8825.47it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 7\n", + "Moving batch error size: 1\n", + "Moving batch acc: 0.875\n", + "Moving batch correct size: 7\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "Subset loss backward time: 2.2182435989379883\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 8/8 [00:00<00:00, 281.73it/s]\n", + "Evaluating step(3): 1.0 across 8 samples, Max potential: 1.0: 100%|██████████| 8/8 [00:02<00:00, 2.96it/s]\n", + "Proposing: 20%|██ | 1/5 [00:08<00:34, 8.54s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pass full check: 1.0 >= 0.875\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1910.10it/s]\n", + "Evaluating step(30): 0.72 across 25 samples, Max potential: 0.86: 50%|█████ | 25/50 [00:18<00:18, 1.38it/s]\n", + "Training Step: 31: 31%|███ | 4/13 [01:05<03:03, 20.39s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.6923076923076923 <= 0.86\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 310.31it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.75it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 454.32it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 12336.19it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 11\n", + "Moving batch error size: 1\n", + "Moving batch acc: 0.9166666666666666\n", + "Moving batch correct size: 11\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "Subset loss backward time: 2.028568983078003\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 12/12 [00:00<00:00, 724.90it/s]\n", + "Evaluating step(4): 1.0 across 12 samples, Max potential: 1.0: 100%|██████████| 12/12 [00:03<00:00, 3.66it/s]\n", + "Proposing: 0%| | 0/5 [00:05= 0.9166666666666666\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2233.56it/s]\n", + "Evaluating step(31): 0.8511 across 47 samples, Max potential: 0.86: 94%|█████████▍| 47/50 [00:16<00:01, 2.81it/s]\n", + "Training Step: 32: 38%|███▊ | 5/13 [01:31<02:58, 22.30s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.8333333333333334 <= 0.86\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 269.31it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.20it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 606.49it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1212.58it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 15\n", + "Moving batch error size: 1\n", + "Moving batch acc: 0.9375\n", + "Moving batch correct size: 15\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "Subset loss backward time: 3.2150633335113525\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00\n", + "500 Internal Server Error\n", + "\n", + "

500 Internal Server Error

\n", + "
nginx
\n", + "\n", + ")\n", + "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:50<00:00, 16.89s/it]\n", + "\n", + "Proposing: 20%|██ | 1/5 [00:52<03:28, 52.11s/it]\u001b[A" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", + "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 645.05it/s]\n", + "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 298.94it/s]\n", + "\n", + "Proposing: 40%|████ | 2/5 [00:53<01:07, 22.46s/it]\u001b[A" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", + "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 751.40it/s]\n", + "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 360.88it/s]\n", + "\n", + "Proposing: 60%|██████ | 3/5 [00:54<00:25, 12.66s/it]\u001b[A" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", + "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 332.13it/s]\n", + "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 276.08it/s]\n", + "\n", + "Proposing: 80%|████████ | 4/5 [00:55<00:08, 8.12s/it]\u001b[A" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", + "New prompts: [PromptData(id='a530c025-f25c-4423-b146-215ff73586f4', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Pay special attention to quantities mentioned explicitly, including multiples. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='0b4dc918-1afb-4f03-9193-90ec51a9abab', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 3/3 [00:00<00:00, 440.13it/s]\n", + "Evaluating step(5): 0.6667 across 3 samples, Max potential: 0.6667: 100%|██████████| 3/3 [00:00<00:00, 235.96it/s]\n", + "\n", + "Proposing: 100%|██████████| 5/5 [00:57<00:00, 11.41s/it]\n", + "Training Step: 33: 46%|████▌ | 6/13 [02:33<04:07, 35.35s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fail subset check, try next proposal: 0.6666666666666666 <= 0.6666666666666666\n", + "Done with proposals\n", + "No proposal can improve the subset and full set, go to next step\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 317.05it/s]\n", + "Training: 100%|██████████| 4/4 [00:00<00:00, 676.47it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 543.36it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1518.44it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 18\n", + "Moving batch error size: 2\n", + "Moving batch acc: 0.9\n", + "Moving batch correct size: 18\n", + "Moving batch error size: 2\n", + "Subset Error size: 2\n", + "Subset Correct size: 4\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 0.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "Subset loss backward time: 7.857504606246948\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00 0.6666666666666666\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Loading Data: 100%|██████████| 20/20 [00:00<00:00, 649.93it/s]\n", + "Evaluating step(8): 0.95 across 20 samples, Max potential: 0.95: 100%|██████████| 20/20 [00:02<00:00, 8.93it/s]\n", + "Proposing: 20%|██ | 1/5 [00:08<00:35, 8.79s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pass full check: 0.95 >= 0.9\n", + "Done with proposals\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2667.62it/s]\n", + "Evaluating step(35): 0.8511 across 47 samples, Max potential: 0.86: 94%|█████████▍| 47/50 [00:00<00:00, 559.52it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.8333333333333334 <= 0.86\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 36: 69%|██████▉ | 9/13 [03:21<01:29, 22.39s/it]\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 154.85it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.33it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 610.06it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1798.78it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving batch correct size: 22\n", + "Moving batch error size: 2\n", + "Moving batch acc: 0.95\n", + "Moving batch correct size: 19\n", + "Moving batch error size: 1\n", + "Subset Error size: 1\n", + "Subset Correct size: 2\n", + "Subset score: 0.6666666666666666\n", + "Subset batch acc: 0.6666666666666666\n", + "Subset loss backward...\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "Subset loss backward time: 2.553833246231079\n", + "Optimizer propose...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Proposing: 0%| | 0/5 [00:00\n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + ")\n", + "Teacher generator configured.\n", + "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n", + "Starting step: 38\n", + "trainer_results: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 39: 0%| | 0/12 [00:00\n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + " )\n", + " (loss_fn): EvalFnToTextLoss()\n", + ")\n", + "Trainer(\n", + " (adaltask): ObjectCountAdalComponent(\n", + " eval_fn: compute_single_item, backward_engine: None, backward_engine_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, teacher_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, text_optimizer_model_config: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}\n", + " (task): ObjectCountTaskPipeline(\n", + " (llm_counter): Generator(\n", + " model_kwargs={'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': None}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + " )\n", + " )\n", + " (loss_fn): EvalFnToTextLoss()\n", + " )\n", + ")\n", + "raw_shots: 0, bootstrap_shots: 1\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator configured.\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Backward engine configured for all generators.\n", + "Restoring prompts: PromptData(id='44f6083f-4cf7-4a9a-bf10-20d218ee4106', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True)\n", + "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 51: 0%| | 0/13 [00:00 0.86\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 5848.08it/s]\n", + "Evaluating step(53): 0.9 across 100 samples, Max potential: 0.9: 100%|██████████| 100/100 [00:30<00:00, 3.32it/s]\n", + "Training Step: 54: 23%|██▎ | 3/13 [01:28<05:35, 33.51s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 297.78it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.95it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 407.40it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 8952.62it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1274.72it/s]\n", + "Evaluating step(54): 0.94 across 50 samples, Max potential: 0.94: 100%|██████████| 50/50 [00:16<00:00, 3.06it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer step: 0.94 > 0.88\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 6831.78it/s]\n", + "Evaluating step(54): 0.91 across 100 samples, Max potential: 0.91: 100%|██████████| 100/100 [00:30<00:00, 3.33it/s]\n", + "Training Step: 55: 31%|███ | 4/13 [02:21<06:10, 41.21s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 152.84it/s]\n", + "Training: 100%|██████████| 4/4 [00:03<00:00, 1.28it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 688.86it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1318.45it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 0.0\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data='You will answer a reasoning question. Carefully count each item and verify your total. List each item individually, ensuring each is counted as \"1\" regardless of quantity mentioned. Show your calculations step by step. The last line of your response should be: \\'Answer: $VALUE\\' where VALUE is a numerical value.', requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 2011.16it/s]\n", + "Evaluating step(55): 0.8696 across 23 samples, Max potential: 0.94: 46%|████▌ | 23/50 [00:15<00:17, 1.52it/s]\n", + "Training Step: 56: 38%|███▊ | 5/13 [02:46<04:43, 35.43s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.8333333333333334 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 93.66it/s]\n", + "Training: 100%|██████████| 4/4 [00:02<00:00, 1.75it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 646.55it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 2217.45it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 4572.35it/s]\n", + "Evaluating step(56): 0.94 across 50 samples, Max potential: 0.94: 100%|██████████| 50/50 [00:00<00:00, 390.77it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.94 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Training Step: 57: 46%|████▌ | 6/13 [02:54<03:02, 26.03s/it]\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 145.48it/s]\n", + "Training: 100%|██████████| 4/4 [00:02<00:00, 1.52it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 375.76it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 1437.76it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 0.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check for any grouped items and count them correctly. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1614.47it/s]\n", + "Evaluating step(57): 0.7857 across 14 samples, Max potential: 0.94: 28%|██▊ | 14/50 [00:19<00:50, 1.41s/it]\n", + "Training Step: 58: 54%|█████▍ | 7/13 [03:23<02:42, 27.04s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.7333333333333333 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 137.96it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.94it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 806.79it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 11522.81it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be formatted as: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 3560.17it/s]\n", + "Evaluating step(58): 0.88 across 25 samples, Max potential: 0.94: 50%|█████ | 25/50 [00:17<00:17, 1.45it/s]\n", + "Training Step: 59: 62%|██████▏ | 8/13 [03:47<02:10, 26.06s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.8461538461538461 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 93.90it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.70it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 552.01it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 5648.89it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 0.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1770.11it/s]\n", + "Evaluating step(59): 0.9286 across 42 samples, Max potential: 0.94: 84%|████████▍ | 42/50 [00:16<00:03, 2.49it/s]\n", + "Training Step: 60: 69%|██████▉ | 9/13 [04:13<01:43, 26.00s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.9069767441860465 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 314.86it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.10it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 722.53it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 7940.00it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count for precision. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 7188.43it/s]\n", + "Evaluating step(60): 0.8966 across 29 samples, Max potential: 0.94: 58%|█████▊ | 29/50 [00:15<00:11, 1.84it/s]\n", + "Training Step: 61: 77%|███████▋ | 10/13 [04:35<01:14, 24.87s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.8666666666666667 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 95.68it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.74it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 587.05it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 12520.31it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 3036.62it/s]\n", + "Evaluating step(61): 0.9286 across 42 samples, Max potential: 0.94: 84%|████████▍ | 42/50 [00:00<00:00, 327.89it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.9069767441860465 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Training Step: 62: 85%|████████▍ | 11/13 [04:44<00:40, 20.14s/it]\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 136.40it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 3.17it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 417.11it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 14339.50it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loss backward...\n", + "setting pred name Generator_outputy_pred_2 score to 1.0\n", + "setting pred name Generator_outputy_pred_0 score to 1.0\n", + "setting pred name Generator_outputy_pred_3 score to 1.0\n", + "setting pred name Generator_outputy_pred_1 score to 1.0\n", + "Optimizer propose...\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. Double-check your final count. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=None, requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 5035.06it/s]\n", + "Evaluating step(62): 0.9286 across 42 samples, Max potential: 0.94: 84%|████████▍ | 42/50 [00:00<00:00, 327.19it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimizer revert: 0.9069767441860465 <= 0.94\n", + "Saving checkpoint to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Training Step: 62: 92%|█████████▏| 12/13 [04:51<00:24, 24.28s/it]\n", + "Epoch: 0%| | 0/1 [04:51\n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {# Few shot demos #}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': \"You will answer a reasoning question. Think step by step. The last line of your response should be of the following format: 'Answer: $VALUE' where VALUE is a numerical value.\", 'few_shot_demos': 'None'}, prompt_variables: ['input_str', 'few_shot_demos', 'system_prompt']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): ParseIntegerAnswerComponent(fun_name=parse_integer_answer)\n", + ")\n", + "cache_path: /root/.adalflow/cache_OpenAIClient_gpt-4o.db\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o', 'max_tokens': 4000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}, \n", + " (prompt): Prompt(\n", + " template: \n", + " You are the feedback engine in an optimization system.\n", + " \n", + " Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags.\n", + " 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable.\n", + " 2. Feedback examples: \"Since language models have the X failure mode...\", \"Adding X can fix this error because...\", \"Removing X can improve the objective function because...\", \"Changing X to Y would fix the mistake...\"\n", + " 3. Consider the variable in the context of its peers if provided.\n", + " Remember:\n", + " Be concise, critical, and direct.\n", + " \n", + " \n", + " {{conversation_sec}}\n", + " \n", + " {{objective_instruction_sec}}\n", + " , prompt_variables: ['objective_instruction_sec', 'conversation_sec']\n", + " )\n", + " (model_client): OpenAIClient()\n", + ")\n", + "Teacher generator configured.\n", + "save to /content/adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_4e8a1_run_1.json\n", + "Starting step: 62\n", + "trainer_results: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Step: 63: 0%| | 0/12 [00:00 0.94\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 100/100 [00:00<00:00, 3294.35it/s]\n", + "Evaluating step(65): 0.95 across 100 samples, Max potential: 0.95: 100%|██████████| 100/100 [00:39<00:00, 2.51it/s]\n", + "Training Step: 66: 42%|████▏ | 5/12 [01:50<02:42, 23.20s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training Step: 66\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 186.04it/s]\n", + "Training: 25%|██▌ | 1/4 [00:00<00:02, 1.01it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trace with id fe9b883c-4f47-44f7-a388-b03a2fb10413 already exists. Updating the trace.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\rTraining: 50%|█████ | 2/4 [00:01<00:01, 1.30it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trace with id 12a6ff3d-f54d-4d89-b5f0-1aec30e96398 already exists. Updating the trace.\n", + "Trace with id 840d9ed5-8222-45a9-a406-7445feae9733 already exists. Updating the trace.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training: 100%|██████████| 4/4 [00:02<00:00, 1.46it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trace with id 3a9a47c8-a210-43a4-8d24-b9159babb6e4 already exists. Updating the trace.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 636.54it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 9420.11it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 111.34it/s]\n", + "Training: 100%|██████████| 4/4 [00:01<00:00, 2.50it/s]\n", + "\n", + "Loading Data: 100%|██████████| 4/4 [00:00<00:00, 321.28it/s]\n", + "Calculating Loss: 100%|██████████| 4/4 [00:00<00:00, 731.61it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "sampled_augmented_demos: ['99607986-e107-46b8-b86b-177b295983c4']\n", + "New prompts: [PromptData(id='327b63f0-b532-435a-85d7-6137d4e52c4c', name='llm_counter.system_prompt', data=\"You will answer a reasoning question. Carefully count each item and verify your total. List each item individually and ensure accuracy. Show your calculations step by step. The last line of your response should be: 'Answer: $VALUE' where VALUE is a numerical value.\", requires_opt=True), PromptData(id='73a3953b-6351-44d8-a36f-7521db346cca', name='llm_counter.few_shot_demos', data=\"input_str: I have a yam, a cauliflower, a bed, two cabbages, a garlic, an oven, a\\n carrot, a head of broccoli, a potato, a stalk of celery, a lettuce head, and a toaster.\\n How many vegetables do I have?\\nExample: 'Let''s list and count each vegetable individually:\\n\\n\\n 1. Yam\\n\\n 2. Cauliflower\\n\\n 3. Cabbage (1)\\n\\n 4. Cabbage (2)\\n\\n 5. Garlic\\n\\n 6. Carrot\\n\\n 7. Broccoli\\n\\n 8. Potato\\n\\n 9. Celery\\n\\n 10. Lettuce\\n\\n\\n Now, let''s verify the count:\\n\\n\\n 1. Yam\\n\\n 2. Cauliflower\\n\\n 3. Cabbage (1)\\n\\n 4. Cabbage (2)\\n\\n 5. Garlic\\n\\n 6. Carrot\\n\\n 7. Broccoli\\n\\n 8. Potato\\n\\n 9. Celery\\n\\n 10. Lettuce\\n\\n\\n Total number of vegetables: 10\\n\\n\\n Answer: 10'\", requires_opt=True)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 100%|██████████| 50/50 [00:00<00:00, 1120.89it/s]\n", + "Evaluating step(66): 0.96 across 50 samples, Max potential: 0.96: 100%|██████████| 50/50 [00:00<00:00, 212.00it/s]\n", + "Training Step: 67: 58%|█████▊ | 7/12 [01:55<01:32, 18.51s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fail validation: 0.96 <= 0.96, revert\n", + "Training Step: 67\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Loading Data: 0%| | 0/4 [00:00\n", - " \"Open\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gHK6HFngl6iP" - }, - "source": [ - "# 🤗 Welcome to AdalFlow!\n", - "## The library to build & auto-optimize any LLM task pipelines\n", - "\n", - "Thanks for trying us out, we're here to provide you with the best LLM application development experience you can dream of 😊 any questions or concerns you may have, [come talk to us on discord,](https://discord.gg/ezzszrRZvT) we're always here to help! ⭐ Star us on Github ⭐\n", - "\n", - "\n", - "# Quick Links\n", - "\n", - "Github repo: https://github.com/SylphAI-Inc/AdalFlow\n", - "\n", - "Full Tutorials: https://adalflow.sylph.ai/index.html#.\n", - "\n", - "Deep dive on each API: check out the [developer notes](https://adalflow.sylph.ai/tutorials/index.html).\n", - "\n", - "Common use cases along with the auto-optimization: check out [Use cases](https://adalflow.sylph.ai/use_cases/index.html).\n", - "\n", - "# Author\n", - "\n", - "This notebook was created by community contributor [Ajith](https://github.com/ajithvcoder).\n", - "\n", - "# Outline\n", - "\n", - "This is a quick introduction of what AdalFlow is capable of. We will cover:\n", - "\n", - "* How to use `DataClass` with `DataClassParser`.\n", - "* How to do nested dataclass, we will test both one and two levels of nesting.\n", - "\n", - "**Next: Try our [auto-optimization](https://colab.research.google.com/drive/1n3mHUWekTEYHiBdYBTw43TKlPN41A9za?usp=sharing)**\n", - "\n", - "\n", - "# Installation\n", - "\n", - "1. Use `pip` to install the `adalflow` Python package. We will need `openai` and `groq`from the extra packages.\n", - "\n", - " ```bash\n", - " pip install adalflow[openai,groq]\n", - " ```\n", - "2. Setup `openai` and `groq` API key in the environment variables" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nqe-vxB1BCux" - }, - "source": [ - "### Install adalflow" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "ZaaevxNH9JMQ" - }, - "outputs": [], - "source": [ - "# Install adalflow with necessary dependencies\n", - "from IPython.display import clear_output\n", - "\n", - "!pip install -U adalflow[openai,groq]\n", - "\n", - "clear_output()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NGE70aZ8BLuf" - }, - "source": [ - "### Set Environment Variables\n", - "\n", - "Note: Enter your api keys in below cell" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "j2xmGr_99YDq", - "outputId": "c3d1e0b7-9072-412e-fed1-4578404357be" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Overwriting .env\n" - ] - } - ], - "source": [ - "%%writefile .env\n", - "\n", - "OPENAI_API_KEY=\"PASTE-OPENAI_API_KEY_HERE\"\n", - "GROQ_API_KEY=\"PASTE-GROQ_API_KEY-HERE\"" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "API keys have been set.\n" - ] - } - ], - "source": [ - "# or more securely\n", - "\n", - "import os\n", - "\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "\n", - "\n", - "# Set environment variables\n", - "os.environ['GROQ_API_KEY'] = groq_api_key\n", - "os.environ['OPENAI_API_KEY'] = openai_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZxBkm77uBZpl" - }, - "source": [ - "### Import necessary libraries" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "wOAiKg899Z2u" - }, - "outputs": [], - "source": [ - "# Import required libraries\n", - "from dataclasses import dataclass, field\n", - "from typing import List, Dict\n", - "import adalflow as adal\n", - "from adalflow.components.model_client import GroqAPIClient\n", - "from adalflow.utils import setup_env" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'0.2.4'" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "adal.__version__" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bTzgyp6S9bnH" - }, - "outputs": [], - "source": [ - "# Load environment variables - Make sure to have OPENAI_API_KEY in .env file and .env is present in current folder\n", - "setup_env(\".env\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MBW5viOG9hM8" - }, - "source": [ - "### Basic Vannila Example" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "id": "YA4pAIek9ewc" - }, - "outputs": [], - "source": [ - "# Define the output structure using dataclass\n", - "@dataclass\n", - "class BasicQAOutput(adal.DataClass):\n", - " explanation: str = field(\n", - " metadata={\"desc\": \"A brief explanation of the concept in one sentence.\"}\n", - " )\n", - " example: str = field(\n", - " metadata={\"desc\": \"An example of the concept in a sentence.\"}\n", - " )\n", - " # Control output fields order\n", - " __output_fields__ = [\"explanation\", \"example\"]\n", - "\n", - "# Define the template using jinja2 syntax\n", - "qa_template = r\"\"\"\n", - "You are a helpful assistant.\n", - "\n", - "{{output_format_str}}\n", - "\n", - "\n", - " {{input_str}} \"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "x4__jnbP9luN" - }, - "outputs": [], - "source": [ - "# Define the QA component\n", - "class QA(adal.Component):\n", - " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", - " super().__init__()\n", - "\n", - " # Initialize the parser with the output dataclass\n", - " parser = adal.DataClassParser(data_class=BasicQAOutput, return_data_class=True)\n", - "\n", - " # Set up the generator with model, template, and parser\n", - " self.generator = adal.Generator(\n", - " model_client=model_client,\n", - " model_kwargs=model_kwargs,\n", - " template=qa_template,\n", - " prompt_kwargs={\"output_format_str\": parser.get_output_format_str()},\n", - " output_processors=parser,\n", - " )\n", - "\n", - " def call(self, query: str):\n", - " \"\"\"Synchronous call to generate response\"\"\"\n", - " return self.generator.call({\"input_str\": query})\n", - "\n", - " async def acall(self, query: str):\n", - " \"\"\"Asynchronous call to generate response\"\"\"\n", - " return await self.generator.acall({\"input_str\": query})\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "id": "TVi3rGvs9nte" - }, - "outputs": [], - "source": [ - "# Example usage\n", - "def run_basic_example():\n", - " # Instantiate the QA class with Groq model\n", - " qa = QA(\n", - " model_client=GroqAPIClient(),\n", - " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", - " )\n", - "\n", - " # Print the QA instance details\n", - " print(qa)\n", - "\n", - " # Test the QA system\n", - " response = qa(\"What is LLM?\")\n", - " print(\"\\nResponse:\")\n", - " print(response)\n", - " print(f\"BasicQAOutput: {response.data}\")\n", - " print(f\"Explanation: {response.data.explanation}\")\n", - " print(f\"Example: {response.data.example}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "QA(\n", - " (generator): Generator(\n", - " model_kwargs={'model': 'llama3-8b-8192'}, trainable_prompt_kwargs=[]\n", - " (prompt): Prompt(\n", - " template: \n", - " You are a helpful assistant.\n", - " \n", - " {{output_format_str}}\n", - " \n", - " \n", - " {{input_str}} , prompt_kwargs: {'output_format_str': 'Your output should be formatted as a standard JSON instance with the following schema:\\n```\\n{\\n \"explanation\": \"A brief explanation of the concept in one sentence. (str) (required)\",\\n \"example\": \"An example of the concept in a sentence. (str) (required)\"\\n}\\n```\\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\\n-Use double quotes for the keys and string values.\\n-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the JSON output.\\n-Follow the JSON formatting conventions.'}, prompt_variables: ['input_str', 'output_format_str']\n", - " )\n", - " (model_client): GroqAPIClient()\n", - " (output_processors): DataClassParser(\n", - " data_class=BasicQAOutput, format_type=json, return_data_class=True, input_fields=[], output_fields=['explanation', 'example']\n", - " (_output_processor): JsonParser()\n", - " (output_format_prompt): Prompt(\n", - " template: Your output should be formatted as a standard JSON instance with the following schema:\n", - " ```\n", - " {{schema}}\n", - " ```\n", - " -Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n", - " -Use double quotes for the keys and string values.\n", - " -DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the JSON output.\n", - " -Follow the JSON formatting conventions., prompt_variables: ['schema']\n", - " )\n", - " )\n", - " )\n", - ")\n", - "\n", - "Response:\n", - "GeneratorOutput(id=None, data=BasicQAOutput(explanation='Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language', example='The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy'), error=None, usage=CompletionUsage(completion_tokens=60, prompt_tokens=174, total_tokens=234), raw_response='```\\n{\\n \"explanation\": \"Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language\",\\n \"example\": \"The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy\"\\n}\\n```', metadata=None)\n", - "BasicQAOutput: BasicQAOutput(explanation='Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language', example='The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy')\n", - "Explanation: Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language\n", - "Example: The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy\n" - ] - } - ], - "source": [ - "run_basic_example()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1n7edLQ19ql8" - }, - "source": [ - "### Example 1 - Movie analysis data class" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "id": "5Arp4-Dq9u49" - }, - "outputs": [], - "source": [ - "# 1. Basic DataClass with different field types\n", - "@dataclass\n", - "class MovieReview(adal.DataClass):\n", - " title: str = field(\n", - " metadata={\"desc\": \"The title of the movie\"}\n", - " )\n", - " rating: float = field(\n", - " metadata={\n", - " \"desc\": \"Rating from 1.0 to 10.0\",\n", - " \"min\": 1.0,\n", - " \"max\": 10.0\n", - " }\n", - " )\n", - " pros: List[str] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of positive points about the movie\"}\n", - " )\n", - " cons: List[str] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of negative points about the movie\"}\n", - " )\n", - "\n", - " __output_fields__ = [\"title\", \"rating\", \"pros\", \"cons\"]\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": { - "id": "VLbRUzXg9yP0" - }, - "outputs": [], - "source": [ - "\n", - "@dataclass\n", - "class Actor(adal.DataClass):\n", - " name: str = field(metadata={\"desc\": \"Actor's full name\"})\n", - " role: str = field(metadata={\"desc\": \"Character name in the movie\"})" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": { - "id": "7MUcu0tk91l4" - }, - "outputs": [], - "source": [ - "# 2. Nested DataClass example\n", - "\n", - "# Have both MovieReview and Actor nested in DetailedMovieReview\n", - "\n", - "@dataclass\n", - "class DetailedMovieReview(adal.DataClass):\n", - " basic_review: MovieReview\n", - " cast: List[Actor] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of main actors in the movie\"}\n", - " )\n", - " genre: List[str] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of genres for the movie\"}\n", - " )\n", - " recommend: bool = field(\n", - " default_factory=str,\n", - " metadata={\"desc\": \"Whether you would recommend this movie\"}\n", - " )\n", - "\n", - " __output_fields__ = [\"basic_review\", \"cast\", \"genre\", \"recommend\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "# Example template for movie review\n", - "movie_review_template = r\"\"\"\n", - "You are a professional movie critic. Analyze the given movie and provide a detailed review.\n", - "\n", - "{{output_format_str}}\n", - "\n", - "\n", - " Review this movie: {{movie_title}} \"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "# Create the MovieReviewer component with MovieAnalysis data class\n", - "class MovieReviewer(adal.Component):\n", - " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict, data_class: adal.DataClass):\n", - " super().__init__()\n", - " self.additional_structure_prompt = \"Dont use 'type' and 'properties' in output directly give as dict\"\n", - " parser = adal.DataClassParser(\n", - " data_class=data_class,\n", - " return_data_class=True\n", - " )\n", - " self.generator = adal.Generator(\n", - " model_client=model_client,\n", - " model_kwargs=model_kwargs,\n", - " template=movie_review_template,\n", - " prompt_kwargs={\"output_format_str\": parser.get_output_format_str() + self.additional_structure_prompt},\n", - " output_processors=parser,\n", - " )\n", - "\n", - " def call(self, movie_title: str):\n", - " return self.generator.call({\"movie_title\": movie_title})" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=8.5, pros=['Groundbreaking special effects', 'Intriguing story with complex themes', 'Well-developed characters', 'Excellent world-building'], cons=['Pacing can be slow in some parts']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Science Fiction', 'Action'], recommend=True)\n", - "BasicReview: MovieReview(title='The Matrix', rating=8.5, pros=['Groundbreaking special effects', 'Intriguing story with complex themes', 'Well-developed characters', 'Excellent world-building'], cons=['Pacing can be slow in some parts'])\n", - "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')]\n" - ] - } - ], - "source": [ - "# test the data class with one level of nesting\n", - "\n", - "reviewer = MovieReviewer(\n", - " model_client=GroqAPIClient(),\n", - " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", - " data_class=DetailedMovieReview\n", - ")\n", - "\n", - "response = reviewer(\"The Matrix\")\n", - "print(f\"DetailedMovieReview: {response.data}\")\n", - "print(f\"BasicReview: {response.data.basic_review}\")\n", - "print(f\"Cast: {response.data.cast}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.0, pros=['Innovative special effects and action sequences', 'Thought-provoking storyline', 'Engaging cyberpunk aesthetic', 'Strong performances from the cast', 'Iconic fight choreography'], cons=['Complex narrative that may confuse some viewers', 'Some dated CGI when compared to modern standards']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity'), Actor(name='Hugo Weaving', role='Agent Smith')], genre=['Science Fiction', 'Action', 'Adventure'], recommend=True)\n", - "BasicReview: MovieReview(title='The Matrix', rating=9.0, pros=['Innovative special effects and action sequences', 'Thought-provoking storyline', 'Engaging cyberpunk aesthetic', 'Strong performances from the cast', 'Iconic fight choreography'], cons=['Complex narrative that may confuse some viewers', 'Some dated CGI when compared to modern standards'])\n", - "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity'), Actor(name='Hugo Weaving', role='Agent Smith')]\n" - ] - } - ], - "source": [ - "# try use openai model\n", - "reviewer = MovieReviewer(\n", - " model_client=adal.OpenAIClient(),\n", - " model_kwargs={\"model\": \"gpt-4o\"},\n", - " data_class=DetailedMovieReview\n", - ")\n", - "response = reviewer(\"The Matrix\")\n", - "print(f\"DetailedMovieReview: {response.data}\")\n", - "print(f\"BasicReview: {response.data.basic_review}\")\n", - "print(f\"Cast: {response.data.cast}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We see both models can handle one level of nested dataclass quite well. And the output ordering will follow the ordering specified in __output_fields__" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": { - "id": "ekr4v8Xg93en" - }, - "outputs": [], - "source": [ - "# 3. second level nested dataclass\n", - "\n", - "@dataclass\n", - "class MovieAnalysis(adal.DataClass):\n", - " review: DetailedMovieReview\n", - " box_office: float = field(\n", - " default=None,\n", - " metadata={\"desc\": \"Box office earnings in millions of dollars\"}\n", - " )\n", - " awards: Dict[str, int] = field(\n", - " default=None,\n", - " metadata={\"desc\": \"Dictionary of award categories and number of wins\"}\n", - " )\n", - "\n", - " __output_fields__ = [\"review\", \"box_office\", \"awards\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MovieAnalysis: MovieAnalysis(review=DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Innovative concept', 'Mind-bending plot', 'Impressive action sequences'], cons=['Some overly complex dialogue', 'Ending leaves room for interpretation']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Action', 'Science Fiction'], recommend=True), box_office=463.5, awards={'Best Visual Effects': 4, 'Best Film Editing': 2, 'Best Sound': 1})\n", - "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Innovative concept', 'Mind-bending plot', 'Impressive action sequences'], cons=['Some overly complex dialogue', 'Ending leaves room for interpretation']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Action', 'Science Fiction'], recommend=True)\n", - "BasicReview: MovieReview(title='The Matrix', rating=9.5, pros=['Innovative concept', 'Mind-bending plot', 'Impressive action sequences'], cons=['Some overly complex dialogue', 'Ending leaves room for interpretation'])\n", - "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')]\n" - ] - } - ], - "source": [ - "# test the data class with two levels of nested dataclass\n", - "\n", - "# gpt-3.5-turbo model\n", - "\n", - "analysis = MovieReviewer(\n", - " model_client=adal.OpenAIClient(),\n", - " model_kwargs={\"model\": \"gpt-3.5-turbo\"},\n", - " data_class=MovieAnalysis\n", - ")\n", - "\n", - "response = analysis(\"The Matrix\")\n", - "print(f\"MovieAnalysis: {response.data}\")\n", - "print(f\"DetailedMovieReview: {response.data.review}\")\n", - "print(f\"BasicReview: {response.data.review.basic_review}\")\n", - "print(f\"Cast: {response.data.review.cast}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MovieAnalysis: MovieAnalysis(review=DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Groundbreaking special effects', 'Thought-provoking themes', 'Innovative storyline', 'Strong performances from the cast'], cons=['Somewhat slow pacing in parts']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Science Fiction', 'Action', 'Adventure'], recommend=True), box_office=463.5, awards={'Academy Awards': 4, 'MTV Movie Awards': 10, 'Saturn Awards': 7})\n", - "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Groundbreaking special effects', 'Thought-provoking themes', 'Innovative storyline', 'Strong performances from the cast'], cons=['Somewhat slow pacing in parts']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Science Fiction', 'Action', 'Adventure'], recommend=True)\n", - "BasicReview: MovieReview(title='The Matrix', rating=9.5, pros=['Groundbreaking special effects', 'Thought-provoking themes', 'Innovative storyline', 'Strong performances from the cast'], cons=['Somewhat slow pacing in parts'])\n", - "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')]\n" - ] - } - ], - "source": [ - "# test the data class with two levels of nested dataclass\n", - "\n", - "analysis = MovieReviewer(\n", - " model_client=GroqAPIClient(),\n", - " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", - " data_class=MovieAnalysis\n", - ")\n", - "\n", - "response = analysis(\"The Matrix\")\n", - "print(f\"MovieAnalysis: {response.data}\")\n", - "print(f\"DetailedMovieReview: {response.data.review}\")\n", - "print(f\"BasicReview: {response.data.review.basic_review}\")\n", - "print(f\"Cast: {response.data.review.cast}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pSTrf8_t-DCx" - }, - "source": [ - "### Example 2: Song Review\n", - "Note: Song Review is modified by keeping Example 1 - Movie Review as a reference so that we would know how to use DataClasses for similar purposes" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": { - "id": "7g9bUa0q-B6Y" - }, - "outputs": [], - "source": [ - "# 1. Basic DataClass with different field types\n", - "@dataclass\n", - "class SongReview(adal.DataClass):\n", - " title: str = field(\n", - " metadata={\"desc\": \"The title of the song\"}\n", - " )\n", - " album: str = field(\n", - " metadata={\"desc\": \"The album of the song\"}\n", - " )\n", - " ranking: int = field(\n", - " metadata={\n", - " \"desc\": \"Billboard peak ranking from 1 to 200\",\n", - " \"min\": 1,\n", - " \"max\": 200\n", - " }\n", - " )\n", - " streaming: Dict[str, int] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"Dict of lastest approximate streaming count in spotify and in youtube. Gives the count in millions\"}\n", - " )\n", - " pros: List[str] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of positive points about the song\"}\n", - " )\n", - " cons: List[str] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of negative points about the song\"}\n", - " )\n", - "\n", - " __output_fields__ = [\"title\", \"rating\", \"streaming\", \"pros\", \"cons\"]\n" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": { - "id": "UGhMRZht-HiB" - }, - "outputs": [], - "source": [ - "\n", - "@dataclass\n", - "class Artist(adal.DataClass):\n", - " name: str = field(metadata={\"desc\": \"Artist's full name\"})\n", - " role: str = field(metadata={\"desc\": \"Artist's role in the song\"})" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": { - "id": "sfNWgPYN-JAj" - }, - "outputs": [], - "source": [ - "# 2. Nested DataClass example\n", - "\n", - "@dataclass\n", - "class DetailedSongReview(adal.DataClass):\n", - " basic_review: SongReview = field(\n", - " default=SongReview, metadata={\"desc\": \"basic Song review details\"}\n", - " )\n", - " cast: List[Artist] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of main singer, lyrisist and musicians in the song\"}\n", - " )\n", - " genre: List[str] = field(\n", - " default_factory=list,\n", - " metadata={\"desc\": \"List of genres for the song\"}\n", - " )\n", - " recommend: bool = field(\n", - " default_factory=str,\n", - " metadata={\"desc\": \"Whether you would recommend this song\"}\n", - " )\n", - "\n", - " __output_fields__ = [\"basic_review\", \"cast\", \"genre\", \"recommend\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": { - "id": "HG8rtCd8-K7t" - }, - "outputs": [], - "source": [ - "# 3. two levels of nesting dataclass\n", - "\n", - "# all these fields as we use default, it is optional, so \n", - "# llm might not output that field if they dont have information\n", - "\n", - "@dataclass\n", - "class SongAnalysis(adal.DataClass):\n", - " review: DetailedSongReview = field(\n", - " default=DetailedSongReview, metadata={\"desc\": \"Song review details\"}\n", - " )\n", - " duration: float = field(\n", - " default=None,\n", - " metadata={\"desc\": \"Duration of the song\"}\n", - " )\n", - " awards: Dict[str, int] = field(\n", - " default=None,\n", - " metadata={\"desc\": \"Dictionary of award categories and number of wins\"}\n", - " )\n", - "\n", - " __output_fields__ = [\"review\", \"duration\", \"awards\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": { - "id": "v3mNeyz7-MpY" - }, - "outputs": [], - "source": [ - "# Example template for song review\n", - "song_review_template = r\"\"\"\n", - "You are a professional song critic. Analyze the given song and provide a detailed review.\n", - "\n", - "{{output_format_str}}\n", - "\n", - "\n", - " Review this song: {{song_title}} \"\"\"\n" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": { - "id": "X2eifXOU-OrE" - }, - "outputs": [], - "source": [ - "# Create the SongReviewer component with SongAnalysis data class\n", - "class SongReviewer(adal.Component):\n", - " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", - " super().__init__()\n", - " self.additional_structure_prompt = \"Dont use 'type' and 'properties' in output directly give as dict\"\n", - " parser = adal.DataClassParser(\n", - " data_class=SongAnalysis,\n", - " return_data_class=False,\n", - " format_type=\"json\"\n", - " )\n", - " self.generator = adal.Generator(\n", - " model_client=model_client,\n", - " model_kwargs=model_kwargs,\n", - " template=song_review_template,\n", - " prompt_kwargs={\"output_format_str\": parser.get_output_format_str() + self.additional_structure_prompt },\n", - " output_processors=parser,\n", - " )\n", - "\n", - " def call(self, song_title: str):\n", - " return self.generator.call({\"song_title\": song_title})" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "SongAnalysis: {'review': {'basic_review': {'title': 'Shape of You', 'album': '÷ (Divide)', 'ranking': 7, 'streaming': {'spotify': 4.5, 'youtube': 2.5}, 'pros': ['Catchy beat', 'Catchy melody', 'Funky rhythm', 'Great lyrics'], 'cons': ['Some may find the lyrics objectifying', 'Not typically my cup of tea']}, 'cast': [{'name': 'Ed Sheeran', 'role': 'Lead vocals, songwriting'}], 'genre': ['Pop', 'Dance', 'Electro'], 'recommend': True}, 'duration': 3.53}\n" - ] - } - ], - "source": [ - "analysis = SongReviewer(\n", - " model_client=GroqAPIClient(),\n", - " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", - ")\n", - "\n", - "response = analysis(\"Shape of you\")\n", - "print(f\"SongAnalysis: {response.data}\")\n", - "\n", - "# this time as we set `return_data_class` to False in the parser, we get the output as dict" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Song Title: Shape of You\n", - "Album: ÷ (Divide)\n", - "Ranking: 7\n", - "- spotify - 4.5 million views\n", - "- youtube - 2.5 million views\n", - "\n", - "Pros:\n", - "- Catchy beat\n", - "- Catchy melody\n", - "- Funky rhythm\n", - "- Great lyrics\n", - "\n", - "Artist's:\n", - "- Ed Sheeran as Lead vocals, songwriting\n", - "\n", - "Genere: \n", - " Pop \n", - " Dance \n", - " Electro \n", - "\n", - "Duration: 3.53 minutes\n" - ] - } - ], - "source": [ - "# Access nested data\n", - "analysis = response.data\n", - "print(f\"Song Title: {analysis['review']['basic_review']['title']}\")\n", - "print(f\"Album: {analysis['review']['basic_review']['album']}\")\n", - "print(f\"Ranking: {analysis['review']['basic_review']['ranking']}\")\n", - "\n", - "for platform, views in analysis['review']['basic_review']['streaming'].items():\n", - " print(f\"- {platform} - {views} million views\")\n", - "print(\"\\nPros:\")\n", - "for pro in analysis['review'][\"basic_review\"][\"pros\"]:\n", - " print(f\"- {pro}\")\n", - "\n", - "print(\"\\nArtist's:\")\n", - "for actor in analysis['review'][\"cast\"]:\n", - " print(f\"- {actor['name']} as {actor['role']}\")\n", - "\n", - "if analysis['review']['genre']:\n", - " print(f\"\\nGenere: \")\n", - " for genre in analysis['review']['genre']:\n", - " print(f\" {genre} \")\n", - "\n", - "if analysis['duration']:\n", - " print(f\"\\nDuration: {analysis['duration']} minutes\")\n", - "\n", - "if hasattr(analysis, 'awards') and analysis['awards']:\n", - " print(\"\\nAwards:\")\n", - " for category, count in analysis['awards'].items():\n", - " print(f\"- {category}: {count}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "TODOs:\n", - "1. Add `JsonOutputParser` and `YamlOutputParser` to this notebook." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BLAF5qTEmoyW" - }, - "source": [ - "# Issues and feedback\n", - "\n", - "If you encounter any issues, please report them here: [GitHub Issues](https://github.com/SylphAI-Inc/LightRAG/issues).\n", - "\n", - "For feedback, you can use either the [GitHub discussions](https://github.com/SylphAI-Inc/LightRAG/discussions) or [Discord](https://discord.gg/ezzszrRZvT)." - ] - } - ], - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "hGLYrUwBmvUD" + }, + "source": [ + "\n", + " \"Open\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gHK6HFngl6iP" + }, + "source": [ + "# 🤗 Welcome to AdalFlow!\n", + "## The library to build & auto-optimize any LLM task pipelines\n", + "\n", + "Thanks for trying us out, we're here to provide you with the best LLM application development experience you can dream of 😊 any questions or concerns you may have, [come talk to us on discord,](https://discord.gg/ezzszrRZvT) we're always here to help! ⭐ Star us on Github ⭐\n", + "\n", + "\n", + "# Quick Links\n", + "\n", + "Github repo: https://github.com/SylphAI-Inc/AdalFlow\n", + "\n", + "Full Tutorials: https://adalflow.sylph.ai/index.html#.\n", + "\n", + "Deep dive on each API: check out the [developer notes](https://adalflow.sylph.ai/tutorials/index.html).\n", + "\n", + "Common use cases along with the auto-optimization: check out [Use cases](https://adalflow.sylph.ai/use_cases/index.html).\n", + "\n", + "# Author\n", + "\n", + "This notebook was created by community contributor [Ajith](https://github.com/ajithvcoder).\n", + "\n", + "# Outline\n", + "\n", + "This is a quick introduction of what AdalFlow is capable of. We will cover:\n", + "\n", + "* How to use `DataClass` with `DataClassParser`.\n", + "* How to do nested dataclass, we will test both one and two levels of nesting.\n", + "\n", + "**Next: Try our [auto-optimization](https://colab.research.google.com/drive/1n3mHUWekTEYHiBdYBTw43TKlPN41A9za?usp=sharing)**\n", + "\n", + "\n", + "# Installation\n", + "\n", + "1. Use `pip` to install the `adalflow` Python package. We will need `openai` and `groq`from the extra packages.\n", + "\n", + " ```bash\n", + " pip install adalflow[openai,groq]\n", + " ```\n", + "2. Setup `openai` and `groq` API key in the environment variables" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nqe-vxB1BCux" + }, + "source": [ + "### Install adalflow" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "ZaaevxNH9JMQ" + }, + "outputs": [], + "source": [ + "# Install adalflow with necessary dependencies\n", + "from IPython.display import clear_output\n", + "\n", + "!pip install -U adalflow[openai,groq]\n", + "\n", + "clear_output()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NGE70aZ8BLuf" + }, + "source": [ + "### Set Environment Variables\n", + "\n", + "Note: Enter your api keys in below cell" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { "colab": { - "collapsed_sections": [ - "nqe-vxB1BCux", - "NGE70aZ8BLuf" - ], - "provenance": [] - }, - "kernelspec": { - "display_name": "my-project-kernel", - "language": "python", - "name": "my-project-kernel" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.4" + "base_uri": "https://localhost:8080/" + }, + "id": "j2xmGr_99YDq", + "outputId": "c3d1e0b7-9072-412e-fed1-4578404357be" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting .env\n" + ] + } + ], + "source": [ + "%%writefile .env\n", + "\n", + "OPENAI_API_KEY=\"PASTE-OPENAI_API_KEY_HERE\"\n", + "GROQ_API_KEY=\"PASTE-GROQ_API_KEY-HERE\"" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API keys have been set.\n" + ] } + ], + "source": [ + "# or more securely\n", + "\n", + "import os\n", + "\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "\n", + "\n", + "# Set environment variables\n", + "os.environ['GROQ_API_KEY'] = groq_api_key\n", + "os.environ['OPENAI_API_KEY'] = openai_api_key\n", + "\n", + "print(\"API keys have been set.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZxBkm77uBZpl" + }, + "source": [ + "### Import necessary libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "wOAiKg899Z2u" + }, + "outputs": [], + "source": [ + "# Import required libraries\n", + "from dataclasses import dataclass, field\n", + "from typing import List, Dict\n", + "import adalflow as adal\n", + "from adalflow.components.model_client import GroqAPIClient\n", + "from adalflow.utils import setup_env" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'0.2.4'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "adal.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bTzgyp6S9bnH" + }, + "outputs": [], + "source": [ + "# Load environment variables - Make sure to have OPENAI_API_KEY in .env file and .env is present in current folder\n", + "setup_env(\".env\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MBW5viOG9hM8" + }, + "source": [ + "### Basic Vannila Example" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "YA4pAIek9ewc" + }, + "outputs": [], + "source": [ + "# Define the output structure using dataclass\n", + "@dataclass\n", + "class BasicQAOutput(adal.DataClass):\n", + " explanation: str = field(\n", + " metadata={\"desc\": \"A brief explanation of the concept in one sentence.\"}\n", + " )\n", + " example: str = field(\n", + " metadata={\"desc\": \"An example of the concept in a sentence.\"}\n", + " )\n", + " # Control output fields order\n", + " __output_fields__ = [\"explanation\", \"example\"]\n", + "\n", + "# Define the template using jinja2 syntax\n", + "qa_template = r\"\"\"\n", + "You are a helpful assistant.\n", + "\n", + "{{output_format_str}}\n", + "\n", + "\n", + " {{input_str}} \"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "x4__jnbP9luN" + }, + "outputs": [], + "source": [ + "# Define the QA component\n", + "class QA(adal.Component):\n", + " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", + " super().__init__()\n", + "\n", + " # Initialize the parser with the output dataclass\n", + " parser = adal.DataClassParser(data_class=BasicQAOutput, return_data_class=True)\n", + "\n", + " # Set up the generator with model, template, and parser\n", + " self.generator = adal.Generator(\n", + " model_client=model_client,\n", + " model_kwargs=model_kwargs,\n", + " template=qa_template,\n", + " prompt_kwargs={\"output_format_str\": parser.get_output_format_str()},\n", + " output_processors=parser,\n", + " )\n", + "\n", + " def call(self, query: str):\n", + " \"\"\"Synchronous call to generate response\"\"\"\n", + " return self.generator.call({\"input_str\": query})\n", + "\n", + " async def acall(self, query: str):\n", + " \"\"\"Asynchronous call to generate response\"\"\"\n", + " return await self.generator.acall({\"input_str\": query})\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "TVi3rGvs9nte" + }, + "outputs": [], + "source": [ + "# Example usage\n", + "def run_basic_example():\n", + " # Instantiate the QA class with Groq model\n", + " qa = QA(\n", + " model_client=GroqAPIClient(),\n", + " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", + " )\n", + "\n", + " # Print the QA instance details\n", + " print(qa)\n", + "\n", + " # Test the QA system\n", + " response = qa(\"What is LLM?\")\n", + " print(\"\\nResponse:\")\n", + " print(response)\n", + " print(f\"BasicQAOutput: {response.data}\")\n", + " print(f\"Explanation: {response.data.explanation}\")\n", + " print(f\"Example: {response.data.example}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "QA(\n", + " (generator): Generator(\n", + " model_kwargs={'model': 'llama3-8b-8192'}, trainable_prompt_kwargs=[]\n", + " (prompt): Prompt(\n", + " template: \n", + " You are a helpful assistant.\n", + " \n", + " {{output_format_str}}\n", + " \n", + " \n", + " {{input_str}} , prompt_kwargs: {'output_format_str': 'Your output should be formatted as a standard JSON instance with the following schema:\\n```\\n{\\n \"explanation\": \"A brief explanation of the concept in one sentence. (str) (required)\",\\n \"example\": \"An example of the concept in a sentence. (str) (required)\"\\n}\\n```\\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\\n-Use double quotes for the keys and string values.\\n-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the JSON output.\\n-Follow the JSON formatting conventions.'}, prompt_variables: ['input_str', 'output_format_str']\n", + " )\n", + " (model_client): GroqAPIClient()\n", + " (output_processors): DataClassParser(\n", + " data_class=BasicQAOutput, format_type=json, return_data_class=True, input_fields=[], output_fields=['explanation', 'example']\n", + " (_output_processor): JsonParser()\n", + " (output_format_prompt): Prompt(\n", + " template: Your output should be formatted as a standard JSON instance with the following schema:\n", + " ```\n", + " {{schema}}\n", + " ```\n", + " -Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n", + " -Use double quotes for the keys and string values.\n", + " -DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the JSON output.\n", + " -Follow the JSON formatting conventions., prompt_variables: ['schema']\n", + " )\n", + " )\n", + " )\n", + ")\n", + "\n", + "Response:\n", + "GeneratorOutput(id=None, data=BasicQAOutput(explanation='Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language', example='The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy'), error=None, usage=CompletionUsage(completion_tokens=60, prompt_tokens=174, total_tokens=234), raw_response='```\\n{\\n \"explanation\": \"Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language\",\\n \"example\": \"The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy\"\\n}\\n```', metadata=None)\n", + "BasicQAOutput: BasicQAOutput(explanation='Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language', example='The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy')\n", + "Explanation: Large Language Model (LLM) is a type of artificial intelligence designed to process and generate human-like language\n", + "Example: The new LLM-powered chatbot was able to understand and respond to complex user queries with high accuracy\n" + ] + } + ], + "source": [ + "run_basic_example()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1n7edLQ19ql8" + }, + "source": [ + "### Example 1 - Movie analysis data class" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "5Arp4-Dq9u49" + }, + "outputs": [], + "source": [ + "# 1. Basic DataClass with different field types\n", + "@dataclass\n", + "class MovieReview(adal.DataClass):\n", + " title: str = field(\n", + " metadata={\"desc\": \"The title of the movie\"}\n", + " )\n", + " rating: float = field(\n", + " metadata={\n", + " \"desc\": \"Rating from 1.0 to 10.0\",\n", + " \"min\": 1.0,\n", + " \"max\": 10.0\n", + " }\n", + " )\n", + " pros: List[str] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of positive points about the movie\"}\n", + " )\n", + " cons: List[str] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of negative points about the movie\"}\n", + " )\n", + "\n", + " __output_fields__ = [\"title\", \"rating\", \"pros\", \"cons\"]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "id": "VLbRUzXg9yP0" + }, + "outputs": [], + "source": [ + "\n", + "@dataclass\n", + "class Actor(adal.DataClass):\n", + " name: str = field(metadata={\"desc\": \"Actor's full name\"})\n", + " role: str = field(metadata={\"desc\": \"Character name in the movie\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "7MUcu0tk91l4" + }, + "outputs": [], + "source": [ + "# 2. Nested DataClass example\n", + "\n", + "# Have both MovieReview and Actor nested in DetailedMovieReview\n", + "\n", + "@dataclass\n", + "class DetailedMovieReview(adal.DataClass):\n", + " basic_review: MovieReview\n", + " cast: List[Actor] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of main actors in the movie\"}\n", + " )\n", + " genre: List[str] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of genres for the movie\"}\n", + " )\n", + " recommend: bool = field(\n", + " default_factory=str,\n", + " metadata={\"desc\": \"Whether you would recommend this movie\"}\n", + " )\n", + "\n", + " __output_fields__ = [\"basic_review\", \"cast\", \"genre\", \"recommend\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Example template for movie review\n", + "movie_review_template = r\"\"\"\n", + "You are a professional movie critic. Analyze the given movie and provide a detailed review.\n", + "\n", + "{{output_format_str}}\n", + "\n", + "\n", + " Review this movie: {{movie_title}} \"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the MovieReviewer component with MovieAnalysis data class\n", + "class MovieReviewer(adal.Component):\n", + " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict, data_class: adal.DataClass):\n", + " super().__init__()\n", + " self.additional_structure_prompt = \"Dont use 'type' and 'properties' in output directly give as dict\"\n", + " parser = adal.DataClassParser(\n", + " data_class=data_class,\n", + " return_data_class=True\n", + " )\n", + " self.generator = adal.Generator(\n", + " model_client=model_client,\n", + " model_kwargs=model_kwargs,\n", + " template=movie_review_template,\n", + " prompt_kwargs={\"output_format_str\": parser.get_output_format_str() + self.additional_structure_prompt},\n", + " output_processors=parser,\n", + " )\n", + "\n", + " def call(self, movie_title: str):\n", + " return self.generator.call({\"movie_title\": movie_title})" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=8.5, pros=['Groundbreaking special effects', 'Intriguing story with complex themes', 'Well-developed characters', 'Excellent world-building'], cons=['Pacing can be slow in some parts']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Science Fiction', 'Action'], recommend=True)\n", + "BasicReview: MovieReview(title='The Matrix', rating=8.5, pros=['Groundbreaking special effects', 'Intriguing story with complex themes', 'Well-developed characters', 'Excellent world-building'], cons=['Pacing can be slow in some parts'])\n", + "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')]\n" + ] + } + ], + "source": [ + "# test the data class with one level of nesting\n", + "\n", + "reviewer = MovieReviewer(\n", + " model_client=GroqAPIClient(),\n", + " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", + " data_class=DetailedMovieReview\n", + ")\n", + "\n", + "response = reviewer(\"The Matrix\")\n", + "print(f\"DetailedMovieReview: {response.data}\")\n", + "print(f\"BasicReview: {response.data.basic_review}\")\n", + "print(f\"Cast: {response.data.cast}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.0, pros=['Innovative special effects and action sequences', 'Thought-provoking storyline', 'Engaging cyberpunk aesthetic', 'Strong performances from the cast', 'Iconic fight choreography'], cons=['Complex narrative that may confuse some viewers', 'Some dated CGI when compared to modern standards']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity'), Actor(name='Hugo Weaving', role='Agent Smith')], genre=['Science Fiction', 'Action', 'Adventure'], recommend=True)\n", + "BasicReview: MovieReview(title='The Matrix', rating=9.0, pros=['Innovative special effects and action sequences', 'Thought-provoking storyline', 'Engaging cyberpunk aesthetic', 'Strong performances from the cast', 'Iconic fight choreography'], cons=['Complex narrative that may confuse some viewers', 'Some dated CGI when compared to modern standards'])\n", + "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity'), Actor(name='Hugo Weaving', role='Agent Smith')]\n" + ] + } + ], + "source": [ + "# try use openai model\n", + "reviewer = MovieReviewer(\n", + " model_client=adal.OpenAIClient(),\n", + " model_kwargs={\"model\": \"gpt-4o\"},\n", + " data_class=DetailedMovieReview\n", + ")\n", + "response = reviewer(\"The Matrix\")\n", + "print(f\"DetailedMovieReview: {response.data}\")\n", + "print(f\"BasicReview: {response.data.basic_review}\")\n", + "print(f\"Cast: {response.data.cast}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We see both models can handle one level of nested dataclass quite well. And the output ordering will follow the ordering specified in __output_fields__" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "ekr4v8Xg93en" + }, + "outputs": [], + "source": [ + "# 3. second level nested dataclass\n", + "\n", + "@dataclass\n", + "class MovieAnalysis(adal.DataClass):\n", + " review: DetailedMovieReview\n", + " box_office: float = field(\n", + " default=None,\n", + " metadata={\"desc\": \"Box office earnings in millions of dollars\"}\n", + " )\n", + " awards: Dict[str, int] = field(\n", + " default=None,\n", + " metadata={\"desc\": \"Dictionary of award categories and number of wins\"}\n", + " )\n", + "\n", + " __output_fields__ = [\"review\", \"box_office\", \"awards\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MovieAnalysis: MovieAnalysis(review=DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Innovative concept', 'Mind-bending plot', 'Impressive action sequences'], cons=['Some overly complex dialogue', 'Ending leaves room for interpretation']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Action', 'Science Fiction'], recommend=True), box_office=463.5, awards={'Best Visual Effects': 4, 'Best Film Editing': 2, 'Best Sound': 1})\n", + "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Innovative concept', 'Mind-bending plot', 'Impressive action sequences'], cons=['Some overly complex dialogue', 'Ending leaves room for interpretation']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Action', 'Science Fiction'], recommend=True)\n", + "BasicReview: MovieReview(title='The Matrix', rating=9.5, pros=['Innovative concept', 'Mind-bending plot', 'Impressive action sequences'], cons=['Some overly complex dialogue', 'Ending leaves room for interpretation'])\n", + "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')]\n" + ] + } + ], + "source": [ + "# test the data class with two levels of nested dataclass\n", + "\n", + "# gpt-3.5-turbo model\n", + "\n", + "analysis = MovieReviewer(\n", + " model_client=adal.OpenAIClient(),\n", + " model_kwargs={\"model\": \"gpt-3.5-turbo\"},\n", + " data_class=MovieAnalysis\n", + ")\n", + "\n", + "response = analysis(\"The Matrix\")\n", + "print(f\"MovieAnalysis: {response.data}\")\n", + "print(f\"DetailedMovieReview: {response.data.review}\")\n", + "print(f\"BasicReview: {response.data.review.basic_review}\")\n", + "print(f\"Cast: {response.data.review.cast}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MovieAnalysis: MovieAnalysis(review=DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Groundbreaking special effects', 'Thought-provoking themes', 'Innovative storyline', 'Strong performances from the cast'], cons=['Somewhat slow pacing in parts']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Science Fiction', 'Action', 'Adventure'], recommend=True), box_office=463.5, awards={'Academy Awards': 4, 'MTV Movie Awards': 10, 'Saturn Awards': 7})\n", + "DetailedMovieReview: DetailedMovieReview(basic_review=MovieReview(title='The Matrix', rating=9.5, pros=['Groundbreaking special effects', 'Thought-provoking themes', 'Innovative storyline', 'Strong performances from the cast'], cons=['Somewhat slow pacing in parts']), cast=[Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')], genre=['Science Fiction', 'Action', 'Adventure'], recommend=True)\n", + "BasicReview: MovieReview(title='The Matrix', rating=9.5, pros=['Groundbreaking special effects', 'Thought-provoking themes', 'Innovative storyline', 'Strong performances from the cast'], cons=['Somewhat slow pacing in parts'])\n", + "Cast: [Actor(name='Keanu Reeves', role='Neo'), Actor(name='Laurence Fishburne', role='Morpheus'), Actor(name='Carrie-Anne Moss', role='Trinity')]\n" + ] + } + ], + "source": [ + "# test the data class with two levels of nested dataclass\n", + "\n", + "analysis = MovieReviewer(\n", + " model_client=GroqAPIClient(),\n", + " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", + " data_class=MovieAnalysis\n", + ")\n", + "\n", + "response = analysis(\"The Matrix\")\n", + "print(f\"MovieAnalysis: {response.data}\")\n", + "print(f\"DetailedMovieReview: {response.data.review}\")\n", + "print(f\"BasicReview: {response.data.review.basic_review}\")\n", + "print(f\"Cast: {response.data.review.cast}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pSTrf8_t-DCx" + }, + "source": [ + "### Example 2: Song Review\n", + "Note: Song Review is modified by keeping Example 1 - Movie Review as a reference so that we would know how to use DataClasses for similar purposes" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "id": "7g9bUa0q-B6Y" + }, + "outputs": [], + "source": [ + "# 1. Basic DataClass with different field types\n", + "@dataclass\n", + "class SongReview(adal.DataClass):\n", + " title: str = field(\n", + " metadata={\"desc\": \"The title of the song\"}\n", + " )\n", + " album: str = field(\n", + " metadata={\"desc\": \"The album of the song\"}\n", + " )\n", + " ranking: int = field(\n", + " metadata={\n", + " \"desc\": \"Billboard peak ranking from 1 to 200\",\n", + " \"min\": 1,\n", + " \"max\": 200\n", + " }\n", + " )\n", + " streaming: Dict[str, int] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"Dict of lastest approximate streaming count in spotify and in youtube. Gives the count in millions\"}\n", + " )\n", + " pros: List[str] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of positive points about the song\"}\n", + " )\n", + " cons: List[str] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of negative points about the song\"}\n", + " )\n", + "\n", + " __output_fields__ = [\"title\", \"rating\", \"streaming\", \"pros\", \"cons\"]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "id": "UGhMRZht-HiB" + }, + "outputs": [], + "source": [ + "\n", + "@dataclass\n", + "class Artist(adal.DataClass):\n", + " name: str = field(metadata={\"desc\": \"Artist's full name\"})\n", + " role: str = field(metadata={\"desc\": \"Artist's role in the song\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "id": "sfNWgPYN-JAj" + }, + "outputs": [], + "source": [ + "# 2. Nested DataClass example\n", + "\n", + "@dataclass\n", + "class DetailedSongReview(adal.DataClass):\n", + " basic_review: SongReview = field(\n", + " default=SongReview, metadata={\"desc\": \"basic Song review details\"}\n", + " )\n", + " cast: List[Artist] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of main singer, lyrisist and musicians in the song\"}\n", + " )\n", + " genre: List[str] = field(\n", + " default_factory=list,\n", + " metadata={\"desc\": \"List of genres for the song\"}\n", + " )\n", + " recommend: bool = field(\n", + " default_factory=str,\n", + " metadata={\"desc\": \"Whether you would recommend this song\"}\n", + " )\n", + "\n", + " __output_fields__ = [\"basic_review\", \"cast\", \"genre\", \"recommend\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "id": "HG8rtCd8-K7t" + }, + "outputs": [], + "source": [ + "# 3. two levels of nesting dataclass\n", + "\n", + "# all these fields as we use default, it is optional, so \n", + "# llm might not output that field if they dont have information\n", + "\n", + "@dataclass\n", + "class SongAnalysis(adal.DataClass):\n", + " review: DetailedSongReview = field(\n", + " default=DetailedSongReview, metadata={\"desc\": \"Song review details\"}\n", + " )\n", + " duration: float = field(\n", + " default=None,\n", + " metadata={\"desc\": \"Duration of the song\"}\n", + " )\n", + " awards: Dict[str, int] = field(\n", + " default=None,\n", + " metadata={\"desc\": \"Dictionary of award categories and number of wins\"}\n", + " )\n", + "\n", + " __output_fields__ = [\"review\", \"duration\", \"awards\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "id": "v3mNeyz7-MpY" + }, + "outputs": [], + "source": [ + "# Example template for song review\n", + "song_review_template = r\"\"\"\n", + "You are a professional song critic. Analyze the given song and provide a detailed review.\n", + "\n", + "{{output_format_str}}\n", + "\n", + "\n", + " Review this song: {{song_title}} \"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "id": "X2eifXOU-OrE" + }, + "outputs": [], + "source": [ + "# Create the SongReviewer component with SongAnalysis data class\n", + "class SongReviewer(adal.Component):\n", + " def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict):\n", + " super().__init__()\n", + " self.additional_structure_prompt = \"Dont use 'type' and 'properties' in output directly give as dict\"\n", + " parser = adal.DataClassParser(\n", + " data_class=SongAnalysis,\n", + " return_data_class=False,\n", + " format_type=\"json\"\n", + " )\n", + " self.generator = adal.Generator(\n", + " model_client=model_client,\n", + " model_kwargs=model_kwargs,\n", + " template=song_review_template,\n", + " prompt_kwargs={\"output_format_str\": parser.get_output_format_str() + self.additional_structure_prompt },\n", + " output_processors=parser,\n", + " )\n", + "\n", + " def call(self, song_title: str):\n", + " return self.generator.call({\"song_title\": song_title})" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SongAnalysis: {'review': {'basic_review': {'title': 'Shape of You', 'album': '÷ (Divide)', 'ranking': 7, 'streaming': {'spotify': 4.5, 'youtube': 2.5}, 'pros': ['Catchy beat', 'Catchy melody', 'Funky rhythm', 'Great lyrics'], 'cons': ['Some may find the lyrics objectifying', 'Not typically my cup of tea']}, 'cast': [{'name': 'Ed Sheeran', 'role': 'Lead vocals, songwriting'}], 'genre': ['Pop', 'Dance', 'Electro'], 'recommend': True}, 'duration': 3.53}\n" + ] + } + ], + "source": [ + "analysis = SongReviewer(\n", + " model_client=GroqAPIClient(),\n", + " model_kwargs={\"model\": \"llama3-8b-8192\"},\n", + ")\n", + "\n", + "response = analysis(\"Shape of you\")\n", + "print(f\"SongAnalysis: {response.data}\")\n", + "\n", + "# this time as we set `return_data_class` to False in the parser, we get the output as dict" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Song Title: Shape of You\n", + "Album: ÷ (Divide)\n", + "Ranking: 7\n", + "- spotify - 4.5 million views\n", + "- youtube - 2.5 million views\n", + "\n", + "Pros:\n", + "- Catchy beat\n", + "- Catchy melody\n", + "- Funky rhythm\n", + "- Great lyrics\n", + "\n", + "Artist's:\n", + "- Ed Sheeran as Lead vocals, songwriting\n", + "\n", + "Genere: \n", + " Pop \n", + " Dance \n", + " Electro \n", + "\n", + "Duration: 3.53 minutes\n" + ] + } + ], + "source": [ + "# Access nested data\n", + "analysis = response.data\n", + "print(f\"Song Title: {analysis['review']['basic_review']['title']}\")\n", + "print(f\"Album: {analysis['review']['basic_review']['album']}\")\n", + "print(f\"Ranking: {analysis['review']['basic_review']['ranking']}\")\n", + "\n", + "for platform, views in analysis['review']['basic_review']['streaming'].items():\n", + " print(f\"- {platform} - {views} million views\")\n", + "print(\"\\nPros:\")\n", + "for pro in analysis['review'][\"basic_review\"][\"pros\"]:\n", + " print(f\"- {pro}\")\n", + "\n", + "print(\"\\nArtist's:\")\n", + "for actor in analysis['review'][\"cast\"]:\n", + " print(f\"- {actor['name']} as {actor['role']}\")\n", + "\n", + "if analysis['review']['genre']:\n", + " print(\"\\nGenere: \")\n", + " for genre in analysis['review']['genre']:\n", + " print(f\" {genre} \")\n", + "\n", + "if analysis['duration']:\n", + " print(f\"\\nDuration: {analysis['duration']} minutes\")\n", + "\n", + "if hasattr(analysis, 'awards') and analysis['awards']:\n", + " print(\"\\nAwards:\")\n", + " for category, count in analysis['awards'].items():\n", + " print(f\"- {category}: {count}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODOs:\n", + "1. Add `JsonOutputParser` and `YamlOutputParser` to this notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BLAF5qTEmoyW" + }, + "source": [ + "# Issues and feedback\n", + "\n", + "If you encounter any issues, please report them here: [GitHub Issues](https://github.com/SylphAI-Inc/LightRAG/issues).\n", + "\n", + "For feedback, you can use either the [GitHub discussions](https://github.com/SylphAI-Inc/LightRAG/discussions) or [Discord](https://discord.gg/ezzszrRZvT)." + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [ + "nqe-vxB1BCux", + "NGE70aZ8BLuf" + ], + "provenance": [] + }, + "kernelspec": { + "display_name": "my-project-kernel", + "language": "python", + "name": "my-project-kernel" }, - "nbformat": 4, - "nbformat_minor": 0 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/pyproject.toml b/pyproject.toml index 1f541368..abdd799c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,5 +63,9 @@ line-length = 88 target-version = ["py311"] [tool.ruff] -extend-ignore = ["E402"] # Ignore module-level import issues +lint.extend-ignore = [ + "E402", # Ignore module-level import issues + "E731", + "UP007", # Wants | over Union, which breaks 3.8 +] line-length = 88 diff --git a/tutorials/component.ipynb b/tutorials/component.ipynb index 17e371a4..ccfe51e1 100644 --- a/tutorials/component.ipynb +++ b/tutorials/component.ipynb @@ -6,11 +6,8 @@ "metadata": {}, "outputs": [], "source": [ - "import re\n", "from adalflow.core import Component, Generator\n", - "from adalflow.components.model_client import OpenAIClient\n", - "from adalflow.components.model_client import GroqAPIClient\n", - "from adalflow.utils import setup_env # make sure you have a .env file with OPENAI_API_KEY and GROQ_API_KEY" + "from adalflow.components.model_client import OpenAIClient" ] }, { @@ -74,10 +71,12 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "doc = DocQA()" + ] }, { "cell_type": "code", @@ -235,7 +234,7 @@ "save_pickle(states, \"doc.pkl\")\n", "\n", "# load the serialized states from a file\n", - "from adalflow.utils.file_io import load_pickle, load_json\n", + "from adalflow.utils.file_io import load_pickle\n", "states_loaded = load_pickle(\"doc.pkl\")\n", "# states_loaded = load_json(\"doc.json\")\n", "\n", @@ -463,7 +462,6 @@ "metadata": {}, "outputs": [], "source": [ - "from adalflow.utils.file_io import save_json\n", "\n", "save_json(doc.to_dict(), \"doc.json\")" ] @@ -624,7 +622,7 @@ } ], "source": [ - "from adalflow.core.component import Sequential\n", + "from adalflow import Sequential\n", "\n", "@fun_to_component\n", "def enhance_query(query:str) -> str:\n", diff --git a/tutorials/dataclass.ipynb b/tutorials/dataclass.ipynb index 1b8cc519..e2631c2b 100644 --- a/tutorials/dataclass.ipynb +++ b/tutorials/dataclass.ipynb @@ -57,7 +57,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -77,18 +77,18 @@ } ], "source": [ - "# it does not allow required field after optional field\n", - "@dataclass\n", - "class TrecData2:\n", - " question: Question = field(\n", - " metadata={\"desc\": \"The question asked by the user\"}\n", - " ) # Required field, you have to provide the question field at the instantiation\n", - " label: int = field(\n", - " metadata={\"desc\": \"The label of the question\"}, default=0\n", - " ) # Optional field\n", - " metadata: dict = field(\n", - " metadata={\"desc\": \"The metadata of the question\"}\n", - " ) # required field" + "# # it does not allow required field after optional field\n", + "# @dataclass\n", + "# class TrecData2:\n", + "# question: Question = field(\n", + "# metadata={\"desc\": \"The question asked by the user\"}\n", + "# ) # Required field, you have to provide the question field at the instantiation\n", + "# label: int = field(\n", + "# metadata={\"desc\": \"The label of the question\"}, default=0\n", + "# ) # Optional field\n", + "# metadata: dict = field(\n", + "# metadata={\"desc\": \"The metadata of the question\"}\n", + "# ) # required field" ] }, { diff --git a/tutorials/embedder.ipynb b/tutorials/embedder.ipynb index b7a5c714..29625454 100644 --- a/tutorials/embedder.ipynb +++ b/tutorials/embedder.ipynb @@ -15,7 +15,6 @@ "source": [ "from adalflow.core.embedder import Embedder\n", "from adalflow.components.model_client import OpenAIClient\n", - "from adalflow.utils import setup_env # ensure you setup OPENAI_API_KEY in your project .env file\n", "\n", "model_kwargs = {\n", " \"model\": \"text-embedding-3-small\",\n", diff --git a/tutorials/generator.ipynb b/tutorials/generator.ipynb index 1bf47865..e8a3fac2 100644 --- a/tutorials/generator.ipynb +++ b/tutorials/generator.ipynb @@ -44,7 +44,7 @@ ], "source": [ "from adalflow.core import Generator\n", - "from adalflow.components.model_client import OpenAIClient, get_all_messages_content, get_probabilities\n", + "from adalflow.components.model_client import OpenAIClient, get_probabilities\n", "from adalflow.utils import enable_library_logging\n", "\n", "enable_library_logging(level=\"DEBUG\")\n", @@ -78,7 +78,7 @@ "metadata": {}, "outputs": [], "source": [ - "from adalflow.core import Component, Generator, Prompt\n", + "from adalflow.core import Component, Generator\n", "from adalflow.components.model_client import GroqAPIClient\n", "from adalflow.utils import setup_env # noqa\n", "\n", diff --git a/tutorials/model_client.ipynb b/tutorials/model_client.ipynb index 3228d8c1..3e5b7b06 100644 --- a/tutorials/model_client.ipynb +++ b/tutorials/model_client.ipynb @@ -26,7 +26,6 @@ "source": [ "from adalflow.components.model_client import OpenAIClient\n", "from adalflow.core.types import ModelType\n", - "from adalflow.utils import setup_env\n", "\n", "openai_client = OpenAIClient()\n", "\n", diff --git a/tutorials/react_note.ipynb b/tutorials/react_note.ipynb index 2d0f2be0..0b647a4b 100644 --- a/tutorials/react_note.ipynb +++ b/tutorials/react_note.ipynb @@ -8,7 +8,7 @@ "source": [ "from adalflow.components.agent import ReActAgent\n", "from adalflow.core import Generator, ModelClientType, ModelClient\n", - "from adalflow.utils import setup_env, get_logger\n", + "from adalflow.utils import setup_env\n", "\n", "# get_logger(level=\"DEBUG\")\n", "\n", diff --git a/tutorials/retriever.ipynb b/tutorials/retriever.ipynb index 413dc465..c464f46b 100644 --- a/tutorials/retriever.ipynb +++ b/tutorials/retriever.ipynb @@ -536,7 +536,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -551,7 +551,7 @@ ], "source": [ "# try to use title this time\n", - "document_map_func = lambda x: x[\"title\"] + \" \" + x[\"content\"]\n", + "document_map_func = lambda x: x[\"title\"] + \" \" + x[\"content\"] # no \n", "\n", "reranker.build_index_from_documents(documents=documents, document_map_func=document_map_func)\n", "\n", @@ -1300,7 +1300,6 @@ "\n", "from adalflow.tracing import trace_generator_call\n", "\n", - "from adalflow.utils import setup_env\n", "\n", "# 1. set up the tracing for failed call as the retriever has generator attribute\n", "\n", diff --git a/tutorials/tools.ipynb b/tutorials/tools.ipynb index 3490fe7d..c32b9420 100644 --- a/tutorials/tools.ipynb +++ b/tutorials/tools.ipynb @@ -17,7 +17,6 @@ "source": [ "from openai import OpenAI\n", "import json\n", - "from adalflow.utils import setup_env\n", "\n", "client = OpenAI()\n", "\n", @@ -106,7 +105,7 @@ "outputs": [], "source": [ "from dataclasses import dataclass\n", - "from typing import Any, Dict, List, Tuple\n", + "from typing import Any, Dict, List\n", "import numpy as np\n", "import time\n", "import asyncio\n", @@ -445,12 +444,10 @@ "source": [ "# call all the above functions \n", "import nest_asyncio\n", - "import asyncio\n", "\n", "nest_asyncio.apply()\n", "\n", "\n", - "import time\n", "\n", "async def async_function_1():\n", " await asyncio.sleep(1)\n", @@ -1290,13 +1287,7 @@ } ], "source": [ - "import ast\n", - "import builtins\n", - "import contextlib\n", - "import ctypes\n", - "import sys\n", "import threading\n", - "import time\n", "\n", "# Define a list of safe built-ins\n", "SAFE_BUILTINS = {\n", @@ -1787,7 +1778,6 @@ "source": [ "queries = [\"add 2 and 3\", \"search for something\", \"add points (1, 2) and (3, 4)\", \"sum numpy array with arr = np.array([[1, 2], [3, 4]])\", \"multiply 2 with local variable x\", \"divide 2 by 3\"]\n", "\n", - "from adalflow.components.output_parsers import ListOutputParser\n", "from adalflow.core.string_parser import JsonParser # improve a list of json\n", "\n", "preset_prompt_kwargs = {\n", @@ -1982,9 +1972,6 @@ "# first check the openai's function call apis\n", "\n", "from openai import OpenAI\n", - "from openai.types import FunctionDefinition\n", - "from adalflow.utils import setup_env\n", - "import json\n", "\n", "client = OpenAI()\n", "\n", @@ -2242,8 +2229,7 @@ "metadata": {}, "outputs": [], "source": [ - "adalflow_fn_schema =\n", - "{\n", + "adalflow_fn_schema ={\n", " \"type\": \"object\",\n", " \"properties\": {\n", " \"weather\": {\n", @@ -2284,31 +2270,31 @@ "metadata": {}, "outputs": [], "source": [ - " llama_fn_schema = {\n", - " \"type\": \"object\",\n", - " \"properties\": {\"weather\": {\"$ref\": \"#/definitions/Weather\"}},\n", - " \"required\": [\"weather\"],\n", - " \"definitions\": {\n", - " \"Weather\": {\n", - " \"title\": \"Weather\",\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"title\": \"Location\",\n", - " \"desc\": \"The city and state, e.g. San Francisco, CA\",\n", - " \"type\": \"string\",\n", - " },\n", - " \"unit\": {\n", - " \"title\": \"Unit\",\n", - " \"enum\": [\"celsius\", \"fahrenheit\"],\n", - " \"type\": \"string\",\n", - " },\n", + "llama_fn_schema = {\n", + " \"type\": \"object\",\n", + " \"properties\": {\"weather\": {\"$ref\": \"#/definitions/Weather\"}},\n", + " \"required\": [\"weather\"],\n", + " \"definitions\": {\n", + " \"Weather\": {\n", + " \"title\": \"Weather\",\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"location\": {\n", + " \"title\": \"Location\",\n", + " \"desc\": \"The city and state, e.g. San Francisco, CA\",\n", + " \"type\": \"string\",\n", " },\n", - " \"required\": [\"location\", \"unit\"],\n", - " \"additionalProperties\": false,\n", - " }\n", - " },\n", - " }" + " \"unit\": {\n", + " \"title\": \"Unit\",\n", + " \"enum\": [\"celsius\", \"fahrenheit\"],\n", + " \"type\": \"string\",\n", + " },\n", + " },\n", + " \"required\": [\"location\", \"unit\"],\n", + " \"additionalProperties\": False,\n", + " }\n", + " },\n", + "}" ] }, { @@ -2319,7 +2305,6 @@ "source": [ "# level 1, call function with default python data types\n", "# such as str, int, float, list, dict, etc.\n", - "\n", "def _get_current_weather(location: str, unit: str = \"fahrenheit\"):\n", " \"\"\"Get the current weather in a given location\"\"\"\n", " if \"tokyo\" in location.lower():\n", From d7ee05dfabda35af9cd1de595dc764dfeaf09fa4 Mon Sep 17 00:00:00 2001 From: Li Yin Date: Fri, 22 Nov 2024 08:28:19 -0800 Subject: [PATCH 09/10] use the ignore setting in the project toml instead of the precommit config directly, combine with makefile --- .pre-commit-config.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 50db8611..62b0b2ae 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,14 +16,6 @@ repos: args: ['--line-length=88'] exclude: ^docs/|.*\.(json|yaml|md|txt)$ - # - repo: https://github.com/astral-sh/ruff-pre-commit - # rev: v0.4.2 - # hooks: - # # Run the linter. - # - id: ruff - # args: ['--fix'] - # exclude: ^docs/|.*\.(json|yaml|md|txt)$ - # Add local hooks to run custom commands - repo: local hooks: From ece22e423a889d74bbe5101599432440997fb661 Mon Sep 17 00:00:00 2001 From: Li Yin Date: Fri, 22 Nov 2024 08:33:03 -0800 Subject: [PATCH 10/10] use the ignore setting in the project toml instead of the precommit config directly, combine with makefile --- .pre-commit-config.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 62b0b2ae..6f85a6c8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,6 +16,14 @@ repos: args: ['--line-length=88'] exclude: ^docs/|.*\.(json|yaml|md|txt)$ + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.4.2 + hooks: + # Run the linter. + - id: ruff + args: ['--fix'] + exclude: ^docs/|.*\.(json|yaml|md|txt)$ + # Add local hooks to run custom commands - repo: local hooks: