From 96861fe4ac6fd173eb220c74338af0a113372656 Mon Sep 17 00:00:00 2001 From: Hit <100313234+H1Ts@users.noreply.github.com> Date: Sat, 3 Jun 2023 07:30:04 -0400 Subject: [PATCH] Add files via upload --- starcoder-main/LICENSE | 201 ++++++++++ starcoder-main/README.md | 212 +++++++++++ starcoder-main/chat/README.md | 111 ++++++ starcoder-main/chat/config.py | 103 ++++++ starcoder-main/chat/config.yaml | 42 +++ .../chat/deepspeed_z3_config_bf16.json | 40 ++ starcoder-main/chat/dialogues.py | 241 ++++++++++++ starcoder-main/chat/generate.py | 139 +++++++ starcoder-main/chat/npm.py | 4 + starcoder-main/chat/requirements.txt | 6 + starcoder-main/chat/train.py | 345 ++++++++++++++++++ starcoder-main/chat/utils.py | 65 ++++ starcoder-main/finetune/finetune.py | 312 ++++++++++++++++ .../finetune/merge_peft_adapters.py | 40 ++ starcoder-main/requirements.txt | 5 + 15 files changed, 1866 insertions(+) create mode 100644 starcoder-main/LICENSE create mode 100644 starcoder-main/README.md create mode 100644 starcoder-main/chat/README.md create mode 100644 starcoder-main/chat/config.py create mode 100644 starcoder-main/chat/config.yaml create mode 100644 starcoder-main/chat/deepspeed_z3_config_bf16.json create mode 100644 starcoder-main/chat/dialogues.py create mode 100644 starcoder-main/chat/generate.py create mode 100644 starcoder-main/chat/npm.py create mode 100644 starcoder-main/chat/requirements.txt create mode 100644 starcoder-main/chat/train.py create mode 100644 starcoder-main/chat/utils.py create mode 100644 starcoder-main/finetune/finetune.py create mode 100644 starcoder-main/finetune/merge_peft_adapters.py create mode 100644 starcoder-main/requirements.txt diff --git a/starcoder-main/LICENSE b/starcoder-main/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/starcoder-main/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/starcoder-main/README.md b/starcoder-main/README.md new file mode 100644 index 0000000..5e53236 --- /dev/null +++ b/starcoder-main/README.md @@ -0,0 +1,212 @@ +# 💫 StarCoder + +[Paper](https://drive.google.com/file/d/1cN-b9GnWtHzQRoE7M7gAEyivY0kl4BYs/view) | [Model](https://huggingface.co/bigcode/starcoder) | [Playground](https://huggingface.co/spaces/bigcode/bigcode-playground) | [VSCode](https://marketplace.visualstudio.com/items?itemName=HuggingFace.huggingface-vscode) | [Chat](https://huggingface.co/spaces/HuggingFaceH4/starchat-playground) + +# What is this about? +💫 StarCoder is a language model (LM) trained on source code and natural language text. Its training data incorporates more that 80 different programming languages as well as text extracted from GitHub issues and commits and from notebooks. This repository showcases how we get an overview of this LM's capabilities. + +# News + +* **May 9, 2023:** We've fine-tuned StarCoder to act as a helpful coding assistant 💬! Check out the `chat/` directory for the training code and play with the model [here](https://huggingface.co/spaces/HuggingFaceH4/starchat-playground). + +# Disclaimer + +Before you can use the model go to `hf.co/bigcode/starcoder` and accept the agreement. And make sure you are logged into the Hugging Face hub with: +```bash +huggingface-cli login +``` + +# Table of Contents +1. [Quickstart](#quickstart) + - [Installation](#installation) + - [Code generation with StarCoder](#code-generation) + - [Text-generation-inference code](#text-generation-inference) +2. [Fine-tuning](#fine-tuning) + - [Step by step installation with conda](#step-by-step-installation-with-conda) + - [Datasets](#datasets) + - [Stack Exchange](#stack-exchange-se) + - [Merging PEFT adapter layers](#merging-peft-adapter-layers) +3. [Evaluation](#evaluation) +4. [Inference hardware requirements](#inference-hardware-requirements) + +# Quickstart +StarCoder was trained on GitHub code, thus it can be used to perform code generation. More precisely, the model can complete the implementation of a function or infer the following characters in a line of code. This can be done with the help of the 🤗's [transformers](https://github.com/huggingface/transformers) library. + +## Installation +First, we have to install all the libraries listed in `requirements.txt` +```bash +pip install -r requirements.txt +``` +## Code generation +The code generation pipeline is as follows + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +checkpoint = "bigcode/starcoder" +device = "cuda" # for GPU usage or "cpu" for CPU usage + +tokenizer = AutoTokenizer.from_pretrained(checkpoint) +# to save memory consider using fp16 or bf16 by specifying torch_dtype=torch.float16 for example +model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) + +inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to(device) +outputs = model.generate(inputs) +print(tokenizer.decode(outputs[0])) +``` +or +```python +from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline +checkpoint = "bigcode/starcoder" + +model = AutoModelForCausalLM.from_pretrained(checkpoint) +tokenizer = AutoTokenizer.from_pretrained(checkpoint) + +pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0) +print( pipe("def hello():") ) +``` +For hardware requirements, check the section [Inference hardware requirements](#inference-hardware-requirements). + +## Text-generation-inference + +```bash +docker run -p 8080:80 -v $PWD/data:/data -e HUGGING_FACE_HUB_TOKEN= -d ghcr.io/huggingface/text-generation-inference:latest --model-id bigcode/starcoder --max-total-tokens 8192 +``` +For more details, see [here](https://github.com/huggingface/text-generation-inference). + +# Fine-tuning + +Here, we showcase how we can fine-tune this LM on a specific downstream task. + +## Step by step installation with conda + +Create a new conda environment and activate it +```bash +conda create -n env +conda activate env +``` +Install the `pytorch` version compatible with your version of cuda [here](https://pytorch.org/get-started/previous-versions/), for example the following command works with cuda 11.6 +```bash +conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.6 -c pytorch -c nvidia +``` +Install `transformers` and `peft` +```bash +conda install -c huggingface transformers +pip install git+https://github.com/huggingface/peft.git +``` +Note that you can install the latest stable version of transformers by using + +```bash +pip install git+https://github.com/huggingface/transformers +``` + +Install `datasets`, `accelerate` and `huggingface_hub` + +```bash +conda install -c huggingface -c conda-forge datasets +conda install -c conda-forge accelerate +conda install -c conda-forge huggingface_hub +``` + +Finally, install `bitsandbytes` and `wandb` +```bash +pip install bitsandbytes +pip install wandb +``` +To get the full list of arguments with descriptions you can run the following command on any script: +``` +python scripts/some_script.py --help +``` +Before you run any of the scripts make sure you are logged in and can push to the hub: +```bash +huggingface-cli login +``` +Make sure you are logged in `wandb`: +```bash +wandb login +``` +Now that everything is done, you can clone the repository and get into the corresponding directory. + +## Datasets +💫 StarCoder can be fine-tuned to achieve multiple downstream tasks. Our interest here is to fine-tune StarCoder in order to make it follow instructions. [Instruction fine-tuning](https://arxiv.org/pdf/2109.01652.pdf) has gained a lot of attention recently as it proposes a simple framework that teaches language models to align their outputs with human needs. That procedure requires the availability of quality instruction datasets, which contain multiple `instruction - answer` pairs. Unfortunately such datasets are not ubiquitous but thanks to Hugging Face 🤗's [datasets](https://github.com/huggingface/datasets) library we can have access to some good proxies. To fine-tune cheaply and efficiently, we use Hugging Face 🤗's [PEFT](https://github.com/huggingface/peft) as well as Tim Dettmers' [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). + + +### Stack Exchange SE +[Stack Exchange](https://en.wikipedia.org/wiki/Stack_Exchange) is a well-known network of Q&A websites on topics in diverse fields. It is a place where a user can ask a question and obtain answers from other users. Those answers are scored and ranked based on their quality. [Stack exchange instruction](https://huggingface.co/datasets/ArmelR/stack-exchange-instruction) is a dataset that was obtained by scrapping the site in order to build a collection of Q&A pairs. A language model can then be fine-tuned on that dataset to make it elicit strong and diverse question-answering skills. + +To execute the fine-tuning script run the following command: +```bash +python finetune/finetune.py \ + --model_path="bigcode/starcoder"\ + --dataset_name="ArmelR/stack-exchange-instruction"\ + --subset="data/finetune"\ + --split="train"\ + --size_valid_set 10000\ + --streaming\ + --seq_length 2048\ + --max_steps 1000\ + --batch_size 1\ + --input_column_name="question"\ + --output_column_name="response"\ + --gradient_accumulation_steps 16\ + --learning_rate 1e-4\ + --lr_scheduler_type="cosine"\ + --num_warmup_steps 100\ + --weight_decay 0.05\ + --output_dir="./checkpoints" \ +``` +The size of the SE dataset is better manageable when using streaming. We also have to precise the split of the dataset that is used. For more details, check the [dataset's page](https://huggingface.co/datasets/ArmelR/stack-exchange-instruction) on 🤗. Similarly we can modify the command to account for the availability of GPUs + +```bash +python -m torch.distributed.launch \ + --nproc_per_node number_of_gpus finetune/finetune.py \ + --model_path="bigcode/starcoder"\ + --dataset_name="ArmelR/stack-exchange-instruction"\ + --subset="data/finetune"\ + --split="train"\ + --size_valid_set 10000\ + --streaming \ + --seq_length 2048\ + --max_steps 1000\ + --batch_size 1\ + --input_column_name="question"\ + --output_column_name="response"\ + --gradient_accumulation_steps 16\ + --learning_rate 1e-4\ + --lr_scheduler_type="cosine"\ + --num_warmup_steps 100\ + --weight_decay 0.05\ + --output_dir="./checkpoints" \ +``` +## Merging PEFT adapter layers +If you train a model with PEFT, you'll need to merge the adapter layers with the base model if you want to run inference / evaluation. To do so, run: +```bash +python finetune/merge_peft_adapters.py --base_model_name_or_path model_to_merge --peft_model_path model_checkpoint + +# Push merged model to the Hub +python finetune/merge_peft_adapters.py --base_model_name_or_path model_to_merge --peft_model_path model_checkpoint --push_to_hub +``` +For example + +```bash +python finetune/merge_peft_adapters.py --model_name_or_path bigcode/starcoder --peft_model_path checkpoints/checkpoint-1000 --push_to_hub +``` + +# Evaluation +To evaluate StarCoder and its derivatives, you can use the [BigCode-Evaluation-Harness](https://github.com/bigcode-project/bigcode-evaluation-harness) for evaluating Code LLMs. + +# Inference hardware requirements +In FP32 the model requires more than 60GB of RAM, you can load it in FP16 or BF16 in ~30GB, or in 8bit under 20GB of RAM with +```python +# make sure you have accelerate and bitsandbytes installed +from transformers import AutoModelForCausalLM, AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained("bigcode/starcoder") +# for fp16 replace with `load_in_8bit=True` with `torch_dtype=torch.float16` +model = AutoModelForCausalLM.from_pretrained("bigcode/starcoder", device_map="auto", load_in_8bit=True) +print(f"Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB") +```` +``` +Memory footprint: 15939.61 MB +``` +You can also try [starcoder.cpp](https://github.com/bigcode-project/starcoder.cpp), a C++ implementation with [ggml](https://github.com/ggerganov/ggml) library. diff --git a/starcoder-main/chat/README.md b/starcoder-main/chat/README.md new file mode 100644 index 0000000..8a0aedb --- /dev/null +++ b/starcoder-main/chat/README.md @@ -0,0 +1,111 @@ +# Fine-tuning StarCoder for chat-based applications + +This is a fully-working example to fine-tune `StarCoder` on a corpus of multi-turn dialogues and thus create a coding assistant that is chatty and helpful. The resulting model is quite good at generating code for plots and other programming tasks. For example, given the prompt: + +``` +Draw me a map of the world using geopandas. Make it so that only Germany and Spain are colored red. +``` + +the model will produce: + +```python +import pandas as pd +from matplotlib import pyplot as plt +import geopandas as gpd +from shapely.geometry import Point, Polygon +%matplotlib inline +# Load data from URL or local file +url = 'https://raw.githubusercontent.com/datasets/geo-countries/master/data/countries.geojson' +df = gpd.read_file(url) + +# Create plot +fig, ax = plt.subplots() +ax.set_aspect('equal') +world = gpd.GeoDataFrame({'name': ['World'], + 'geometry': [Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])]}) +germany = df[df['ADMIN'] == "Germany"] +spain = df[df['ADMIN'] == "Spain"] +other = df[(df['ADMIN']!= "Germany") & (df['ADMIN']!= "Spain")] +world.plot(color='lightgrey', edgecolor='white', ax=ax) +germany.plot(color="red", ax=ax) +spain.plot(color="red", ax=ax) +other.plot(color="skyblue", ax=ax) +plt.title("European Countries") +plt.show() +``` + +Check out our [blog post](https://huggingface.co/blog/starchat-alpha) for more details. + +## Getting started + +To run the `train.py` script, first create a Python virtual environment using e.g. Conda: + +```shell +conda create -n chat python=3.10 && conda activate chat +``` + +Next, install PyTorch v1.13.1. Since this is hardware-dependent, we direct you to the [PyTorch Installation Page](https://pytorch.org/get-started/previous-versions/#v1131) for this step. Next, install the rest of the project dependencies: + +```shell +pip install -r requirements.txt +``` + +You'll also need to be logged into both your Hugging Face account. To do so, run: + +```shell +huggingface-cli login +``` + +Finally, install Git LFS with: + +```shell +sudo apt-get install git-lfs +``` + +## Prepare your dataset + +For training and inference, we use _dialogue templates_ to format each message in a conversation. For example, a typical dialogue between a human user and AI assistant takes the form: + +```json +{ + "messages": [ + { + "content": "Is it possible to imagine a society without law?", + "role": "user"}, + { + "content": "It is difficult to imagine a society that is able to be maintained without any semblance of Law.", + "role": "assistant", + }, + { + "content": "It seems like you consider the absence of law equal to the absence of anything that could guide the behaviour of the individual.", + "role": "user", + }, + { + "content": "You are correct that there are other factors that can guide behavior in a society and play a role in shaping individuals' behavior and interactions with each other. However, even in societies where these factors are present, laws still serve an important role in maintaining social order and resolving conflicts.", + "role": "assistant", + } + ] +} +``` + +Make sure you convert your dataset according to this schema, in particular you need to include a `messages` column like the above. You can adjust the model, dataset, and hyperparamters in the `config.yaml` file. + +## Launch training + +We use DeepSpeed ZeRO-3 to shard the model and optimizer across 8 x A100 (80GB) GPUs. To fine-tune run: + +``` +TRANSFORMERS_VERBOSITY=info torchrun --nproc_per_node=8 train.py config.yaml --deepspeed=deepspeed_z3_config_bf16.json +``` + +By default, this will save the model checkpoint in the `data/` directory and also push it to the Hugging Face Hub. + + +## Generate samples + +To generate a few coding examples from your model, run: + +```shell +python generate.py --model_id path/to/your/model +``` + diff --git a/starcoder-main/chat/config.py b/starcoder-main/chat/config.py new file mode 100644 index 0000000..a5508a1 --- /dev/null +++ b/starcoder-main/chat/config.py @@ -0,0 +1,103 @@ +from dataclasses import dataclass, field +from typing import Optional + +import transformers +from transformers import MODEL_FOR_CAUSAL_LM_MAPPING + +MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." + ) + }, + ) + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + ) + torch_dtype: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " + "dtype will be automatically derived from the model's weights." + ), + "choices": ["auto", "bfloat16", "float16", "float32"], + }, + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizers (backed by the tokenizers library) or not."}, + ) + + +@dataclass +class DataArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ) + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + ) + }, + ) + block_size: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Optional input sequence length after tokenization. " + "The training dataset will be truncated in blocks of this size for training. " + "Defaults to the model's max input length for single sentence inputs (taking into account special tokens)." + ) + }, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for preprocessing."}, + ) + dialogue_template: Optional[str] = field( + default="no_system", + metadata={ + "help": "The name of the dialogue template to use for conditioning the model. See h4.training.dialogues for choices." + }, + ) + + +@dataclass +class TrainingArguments(transformers.TrainingArguments): + """ + Arguments related to the training process itself. For all parameters, see: https://huggingface.co/docs/transformers/v4.26.1/en/main_classes/trainer#transformers.TrainingArguments + """ + + diff --git a/starcoder-main/chat/config.yaml b/starcoder-main/chat/config.yaml new file mode 100644 index 0000000..141565c --- /dev/null +++ b/starcoder-main/chat/config.yaml @@ -0,0 +1,42 @@ +# Model arguments +model_name_or_path: bigcode/starcoderbase + +# Data training arguments +block_size: 1024 +dataset_name: HuggingFaceH4/oasst1_en +dialogue_template: no_system +preprocessing_num_workers: 12 + +# Training arguments with sensible defaults +# Add other options from here: https://huggingface.co/docs/transformers/v4.26.1/en/main_classes/trainer#transformers.TrainingArguments +bf16: true # Gives ~2x speed up in training time, but disable if you start seeing NaNs +do_eval: true +do_train: true +evaluation_strategy: epoch # One of ["no", "steps", "epoch"] +gradient_accumulation_steps: 8 +gradient_checkpointing: true +hub_model_id: lewtun/starchat-alpha +hub_private_repo: true +hub_strategy: every_save +learning_rate: 2.0e-05 +log_level: passive +logging_steps: 8 +logging_strategy: steps +lr_scheduler_type: cosine +max_steps: -1 +num_train_epochs: 3 +output_dir: data/starchat-alpha +overwrite_output_dir: true +per_device_eval_batch_size: 4 +per_device_train_batch_size: 4 +push_to_hub: true +remove_unused_columns: true +report_to: +- tensorboard +save_steps: 500 +save_strategy: steps +save_total_limit: null +seed: 42 +tf32: true +warmup_ratio: 0.03 +weight_decay: 0. \ No newline at end of file diff --git a/starcoder-main/chat/deepspeed_z3_config_bf16.json b/starcoder-main/chat/deepspeed_z3_config_bf16.json new file mode 100644 index 0000000..d067c66 --- /dev/null +++ b/starcoder-main/chat/deepspeed_z3_config_bf16.json @@ -0,0 +1,40 @@ +{ + "bf16": { + "enabled": "auto" + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 3, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/starcoder-main/chat/dialogues.py b/starcoder-main/chat/dialogues.py new file mode 100644 index 0000000..634c4a1 --- /dev/null +++ b/starcoder-main/chat/dialogues.py @@ -0,0 +1,241 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Type, TypeVar, Union + +from huggingface_hub import ModelHubMixin, hf_hub_download + +# Generic variable that is either ModelHubMixin or a subclass thereof +T = TypeVar("T", bound="ModelHubMixin") + +TEMPLATE_FILENAME = "dialogue_template.json" +IGNORE_INDEX = -100 + + +@dataclass +class DialogueTemplate(ModelHubMixin): + """Converts all turns of a dialogue between a user and assistant to a standardized format. + + Adapted from OpenAI's ChatML (https://github.com/openai/openai-python/blob/main/chatml.md) and Vicuna (https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py) + """ + + system: str + messages: List[Dict[str, str]] = None + system_token: str = "<|system|>" + user_token: str = "<|user|>" + assistant_token: str = "<|assistant|>" + end_token: str = "<|end|>" + + def get_training_prompt(self) -> str: + prompt = self.system_token + "\n" + self.system + self.end_token + "\n" + if self.messages is None: + raise ValueError("Dialogue template must have at least one message.") + for message in self.messages: + if message["role"] == "user": + prompt += self.user_token + "\n" + message["content"] + self.end_token + "\n" + else: + prompt += self.assistant_token + "\n" + message["content"] + self.end_token + "\n" + return prompt + + def get_inference_prompt(self) -> str: + prompt = self.system_token + "\n" + self.system + self.end_token + "\n" + if self.messages is None: + raise ValueError("Dialogue template must have at least one message.") + for message in self.messages: + if message["role"] == "user": + prompt += self.user_token + "\n" + message["content"] + self.end_token + "\n" + else: + prompt += self.assistant_token + "\n" + message["content"] + self.end_token + "\n" + prompt += self.assistant_token + return prompt + + def get_dialogue(self): + """Helper function to format the messages as an easy-to-read dialogue.""" + prompt = "" + if self.messages is None: + raise ValueError("Dialogue template must have at least one message.") + for message in self.messages: + if message["role"] == "user": + prompt += "\n\nHuman: " + message["content"] + else: + prompt += "\n\nAssistant: " + message["content"] + return prompt + + def get_special_tokens(self) -> List[str]: + return [self.system_token, self.user_token, self.assistant_token, self.end_token] + + def copy(self): + return DialogueTemplate( + system=self.system, + messages=self.messages, + system_token=self.system_token, + user_token=self.user_token, + assistant_token=self.assistant_token, + end_token=self.end_token, + ) + + def to_dict(self) -> Dict[str, Any]: + return {k: v for k, v in asdict(self).items()} + + @classmethod + def from_dict(cls, data): + return DialogueTemplate( + system=data["system"] if "system" in data else "", + messages=data["messages"] if "messages" in data else None, + system_token=data["system_token"] if "system_token" in data else "<|system|>", + user_token=data["user_token"] if "user_token" in data else "<|user|>", + assistant_token=data["assistant_token"] if "assistant_token" in data else "<|assistant|>", + end_token=data["end_token"] if "end_token" in data else "<|end|>", + ) + + def _save_pretrained(self, save_directory: Union[str, Path]) -> None: + save_directory = Path(save_directory) + save_directory.mkdir(exist_ok=True) + with open(save_directory / "dialogue_template.json", "w") as f: + json.dump(self.to_dict(), f, indent=2) + + @classmethod + def _from_pretrained( + cls: Type[T], + *, + model_id: str, + revision: Optional[str], + cache_dir: Optional[Union[str, Path]], + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Optional[Union[str, bool]], + **model_kwargs, + ) -> T: + """Loads the dialogue template from a local directory or the Huggingface Hub. + + Args: + model_id (`str`): + ID of the model to load from the Huggingface Hub (e.g. `bigscience/bloom`). + revision (`str`, *optional*): + Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the + latest commit on `main` branch. + force_download (`bool`, *optional*, defaults to `False`): + Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding + the existing cache. + resume_download (`bool`, *optional*, defaults to `False`): + Whether to delete incompletely received files. Will attempt to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint (e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + cache_dir (`str`, `Path`, *optional*): + Path to the folder where cached files are stored. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, avoid downloading the file and return the path to the local cached file if it exists. + model_kwargs: + Additional keyword arguments passed along to the [`~ModelHubMixin._from_pretrained`] method. + """ + if os.path.isdir(model_id): # Can either be a local directory + print("Loading dialogue template from local directory") + template_file = os.path.join(model_id, TEMPLATE_FILENAME) + else: # Or a template on the Hub + template_file = hf_hub_download( # Download from the hub, passing same input args + repo_id=model_id, + filename=TEMPLATE_FILENAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + + # Load template + with open(template_file, "r") as f: + data = json.load(f) + return cls.from_dict(data=data) + + +# A shortened version of the system message in Anthropic's HHH prompt: https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt +default_template = DialogueTemplate( + system="Below is a dialogue between a human user and an AI assistant. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed.", +) + +# OpenAI and OpenAssistant train on few to no system messages. +# TODO: consider defining this as the `default` template +no_system_template = DialogueTemplate( + system="", +) + +alpaca_template = DialogueTemplate( + system="Below is an instruction that describes a task. Write a response that appropriately completes the request.", + user_token="### Instruction:", + assistant_token="### Response:", +) + +SUPPORTED_DIALOGUE_TEMPLATES = { + "default": default_template, + "no_system": no_system_template, + "alpaca": alpaca_template, +} + + +def get_dialogue_template(template: str) -> DialogueTemplate: + if template not in SUPPORTED_DIALOGUE_TEMPLATES.keys(): + raise ValueError(f"Template {template} is not supported!") + return SUPPORTED_DIALOGUE_TEMPLATES[template].copy() + + +def prepare_dialogue(example, dialogue_template, is_train=True): + """Format example to single- or multi-turn dialogue.""" + # TODO: make this simpler by just ensuring every dataset has a messages column + if "messages" in example.keys() and example["messages"] is not None: + dialogue_template.messages = example["messages"] + elif all(k in example.keys() for k in ("prompt", "completion")): + # Construct single-turn dialogue from prompt and completion + dialogue_template.messages = [ + {"role": "user", "content": example["prompt"]}, + {"role": "assistant", "content": example["completion"]}, + ] + elif "prompt" in example.keys(): + # Construct single-turn dialogue from prompt (inference only) + dialogue_template.messages = [ + {"role": "user", "content": example["prompt"]}, + ] + else: + raise ValueError( + f"Could not format example as dialogue! Require either `messages` or `[prompt, completion]` or `[prompt]` keys but found {list(example.keys())}" + ) + if is_train: + example["text"] = dialogue_template.get_training_prompt() + else: + example["text"] = dialogue_template.get_inference_prompt() + return example + + +def mask_user_labels(tokenizer, dialogue_template, labels): + """Masks the user turns of a dialogue from the loss""" + user_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.user_token) + assistant_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.assistant_token) + for idx, label_id in enumerate(labels): + if label_id == user_token_id: + current_idx = idx + while labels[current_idx] != assistant_token_id and current_idx < len(labels): + labels[current_idx] = IGNORE_INDEX + current_idx += 1 diff --git a/starcoder-main/chat/generate.py b/starcoder-main/chat/generate.py new file mode 100644 index 0000000..64a3905 --- /dev/null +++ b/starcoder-main/chat/generate.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# Copyright 2023 The BigCode and HuggingFace teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""A simple script to quickly check the model outputs of a generative model""" +import argparse + +import torch +from dialogues import DialogueTemplate, get_dialogue_template +from transformers import (AutoModelForCausalLM, AutoTokenizer, + GenerationConfig, set_seed) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_id", + type=str, + help="Name of model to generate samples with", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + help="The model repo's revision to use", + ) + parser.add_argument( + "--system_prompt", type=str, default=None, help="Overrides the dialogue template's system prompt" + ) + args = parser.parse_args() + + # Set seed for reproducibility + set_seed(42) + + prompts = [ + [ + { + "role": "user", + "content": "Develop a C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file.", + } + ], + [ + { + "role": "user", + "content": "Implement a Python function to find the longest common subsequence of two input strings using dynamic programming.", + } + ], + [{"role": "user", "content": "Implement a regular expression in Python to validate an email address."}], + [ + { + "role": "user", + "content": "Write a program to find the nth Fibonacci number using dynamic programming.", + } + ], + [ + { + "role": "user", + "content": "Implement a binary search algorithm to find a specific element in a sorted array.", + } + ], + [{"role": "user", "content": "Implement a queue data structure using two stacks in Python."}], + [ + { + "role": "user", + "content": "Implement a program to find the common elements in two arrays without using any extra data structures.", + } + ], + ] + + try: + dialogue_template = DialogueTemplate.from_pretrained(args.model_id, revision=args.revision) + except Exception: + print("No dialogue template found in model repo. Defaulting to the `no_system` template.") + dialogue_template = get_dialogue_template("no_system") + + if args.system_prompt is not None: + dialogue_template.system = args.system_prompt + formatted_prompts = [] + for prompt in prompts: + dialogue_template.messages = [prompt] if isinstance(prompt, dict) else prompt + formatted_prompts.append(dialogue_template.get_inference_prompt()) + + print("=== SAMPLE PROMPT ===") + print(formatted_prompts[0]) + print("=====================") + + device = "cuda" if torch.cuda.is_available() else "cpu" + tokenizer = AutoTokenizer.from_pretrained(args.model_id, revision=args.revision) + print(f"Special tokens: {tokenizer.special_tokens_map}") + print(f"EOS token ID for generation: {tokenizer.convert_tokens_to_ids(dialogue_template.end_token)}") + generation_config = GenerationConfig( + temperature=0.2, + top_k=50, + top_p=0.95, + repetition_penalty=1.2, + do_sample=True, + pad_token_id=tokenizer.eos_token_id, + eos_token_id=tokenizer.convert_tokens_to_ids(dialogue_template.end_token), + min_new_tokens=32, + max_new_tokens=256, + ) + model = AutoModelForCausalLM.from_pretrained( + args.model_id, revision=args.revision, load_in_8bit=True, device_map="auto", torch_dtype=torch.float16 + ) + outputs = "" + for idx, prompt in enumerate(formatted_prompts): + batch = tokenizer(prompt, return_tensors="pt", return_token_type_ids=False).to(device) + generated_ids = model.generate(**batch, generation_config=generation_config) + generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=False).lstrip() + outputs += generated_text + "\n\n" + print(f"=== EXAMPLE {idx} ===") + print() + print(generated_text) + print() + print("======================") + print() + + raw_model_name = args.model_id.split("/")[-1] + model_name = f"{raw_model_name}-{args.prompt_type}" + if args.revision is not None: + model_name += f"-{args.revision}" + + with open(f"data/samples-{model_name}.txt", "w", encoding="utf-8") as f: + f.write(outputs) + + +if __name__ == "__main__": + main() diff --git a/starcoder-main/chat/npm.py b/starcoder-main/chat/npm.py new file mode 100644 index 0000000..fbbc95c --- /dev/null +++ b/starcoder-main/chat/npm.py @@ -0,0 +1,4 @@ +from npm.bindings import npm_install +# Install npm to run the project +# Install the dependencies in a directory containing a package.json +npm_install('/path/to/some/directory/') \ No newline at end of file diff --git a/starcoder-main/chat/requirements.txt b/starcoder-main/chat/requirements.txt new file mode 100644 index 0000000..bebbe22 --- /dev/null +++ b/starcoder-main/chat/requirements.txt @@ -0,0 +1,6 @@ +transformers>=4.28.1 +tokenizers>=0.13.3 +deepspeed==0.9.1 +datasets>=2.12.0 +accelerate>=0.18.0 +tensorboard diff --git a/starcoder-main/chat/train.py b/starcoder-main/chat/train.py new file mode 100644 index 0000000..df383aa --- /dev/null +++ b/starcoder-main/chat/train.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2023 The BigCode & HuggingFace Inc. teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Script to instruction fine-tune causal language models on a Hub dataset + +Adapted from huggingface/transformers: https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm.py +""" + +import logging +import math +import os +import random +import sys +from itertools import chain + +import datasets +import torch +import transformers +from config import DataArguments, ModelArguments, TrainingArguments +from datasets import load_dataset +from dialogues import get_dialogue_template, mask_user_labels, prepare_dialogue +from transformers import (AutoModelForCausalLM, AutoTokenizer, Trainer, + default_data_collator, set_seed) +from transformers.testing_utils import CaptureLogger +from transformers.trainer_utils import get_last_checkpoint +from utils import StarChatArgumentParser, hf_login + +logger = logging.getLogger(__name__) + + +def main(): + parser = StarChatArgumentParser((ModelArguments, DataArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"): + # If we pass only one argument to the script and it's the path to a YAML file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_yaml_file(os.path.abspath(sys.argv[1])) + # parse command line args and yaml file + elif len(sys.argv) > 2 and sys.argv[1].endswith(".yaml"): + model_args, data_args, training_args = parser.parse_yaml_and_args(os.path.abspath(sys.argv[1]), sys.argv[2:]) + # parse command line args only + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + # Set seed for reproducibility + set_seed(training_args.seed) + + ############### + # Setup logging + ############### + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process a small summary + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + logger.info(f"Model parameters {model_args}") + logger.info(f"Data parameters {data_args}") + logger.info(f"Training/evaluation parameters {training_args}") + + # Login to HuggingFace Hub if needed + hf_login() + + ########################### + # Detecting last checkpoint + ########################### + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + + ############### + # Load datasets + ############### + raw_datasets = load_dataset(data_args.dataset_name) + logger.info( + f"Training on the following datasets and their proportions: {[split + ' : ' + str(dset.num_rows) for split, dset in raw_datasets.items()]}" + ) + with training_args.main_process_first(desc="Log a few random samples from the raw training set"): + for index in random.sample(range(len(raw_datasets["train"])), 3): + logger.info(f"Sample {index} of the raw training set:\n\n{raw_datasets['train'][index]['messages']}") + + ######################### + # Apply dialogue template + ######################### + dialogue_template = get_dialogue_template(data_args.dialogue_template) + logger.info(f"System prompt for dialogue template: {dialogue_template.system}") + raw_datasets = raw_datasets.map(prepare_dialogue, fn_kwargs={"dialogue_template": dialogue_template}) + + ##################################### + # Load tokenizer and process datasets + ##################################### + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, + revision=model_args.model_revision, + ) + + # Note that we must call `add_tokens` before adding any special tokens + dialogue_tokens = dialogue_template.get_special_tokens() + num_added_tokens = tokenizer.add_special_tokens({"additional_special_tokens": dialogue_tokens}) + logger.info(f"Added {num_added_tokens} new tokens: {dialogue_tokens}") + + if training_args.do_train: + column_names = list(raw_datasets["train"].features) + else: + column_names = list(raw_datasets["test"].features) + text_column_name = "text" if "text" in column_names else column_names[0] + + with training_args.main_process_first(desc="Log a few random samples from the training set"): + for index in random.sample(range(len(raw_datasets["train"])), 3): + logger.info(f"Sample {index} of the raw training set:\n\n{raw_datasets['train'][index]['text']}") + + # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function + tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") + + def tokenize_function(examples): + with CaptureLogger(tok_logger) as cl: + output = tokenizer(examples[text_column_name], return_token_type_ids=False) + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return output + + with training_args.main_process_first(desc="dataset map tokenization"): + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + + ############################## + # Concatenate and chunk corpus + ############################## + if data_args.block_size is None: + block_size = tokenizer.model_max_length + if block_size > 1024: + logger.warning( + "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" + " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" + " override this default with `--block_size xxx`." + ) + block_size = 1024 + else: + if data_args.block_size > tokenizer.model_max_length: + logger.warning( + f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" + f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." + ) + block_size = min(data_args.block_size, tokenizer.model_max_length) + + # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + labels = result["input_ids"].copy() + mask_user_labels(tokenizer, dialogue_template, labels) + result["labels"] = labels + return result + + # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder + # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower + # to preprocess. + with training_args.main_process_first(desc="grouping texts together"): + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + num_proc=data_args.preprocessing_num_workers, + load_from_cache_file=not data_args.overwrite_cache, + desc=f"Grouping texts in chunks of {block_size}", + ) + + if training_args.do_train: + if "train" not in tokenized_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = lm_datasets["train"] + if data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + + if training_args.do_eval: + if "test" not in tokenized_datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_dataset = lm_datasets["test"] + if data_args.max_eval_samples is not None: + max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) + eval_dataset = eval_dataset.select(range(max_eval_samples)) + + ####################### + # Load pretrained model + ####################### + logger.info("*** Load pretrained model ***") + torch_dtype = ( + model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) + ) + model = AutoModelForCausalLM.from_pretrained( + model_args.model_name_or_path, + revision=model_args.model_revision, + torch_dtype=torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + ) + model.resize_token_embeddings(len(tokenizer)) + + ######################## + # Initialize the Trainer + ######################## + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + tokenizer=tokenizer, + # Data collator defaults to DataCollatorWithPadding, so we change it + # since we've already chunked our corpus + data_collator=default_data_collator, + ) + + ############### + # Training loop + ############### + if training_args.do_train: + logger.info("*** Train ***") + checkpoint = None + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint + train_result = trainer.train(resume_from_checkpoint=checkpoint) + + metrics = train_result.metrics + + max_train_samples = ( + data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) + ) + metrics["train_samples"] = min(max_train_samples, len(train_dataset)) + + trainer.log_metrics("train", metrics) + trainer.save_metrics("train", metrics) + trainer.save_state() + + ########## + # Evaluate + ########## + if training_args.do_eval: + logger.info("*** Evaluate ***") + + metrics = trainer.evaluate() + + max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) + metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) + try: + perplexity = math.exp(metrics["eval_loss"]) + except OverflowError: + perplexity = float("inf") + metrics["perplexity"] = perplexity + + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + ################################# + # Create model card & push to Hub + ################################# + kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + kwargs["dataset_args"] = "default" + + # Store dialogue template so we can load it at deployment time + dialogue_template.save_pretrained(training_args.output_dir) + + if training_args.push_to_hub: + trainer.push_to_hub(**kwargs) + else: + trainer.save_model(training_args.output_dir) + trainer.create_model_card(**kwargs) + + with training_args.main_process_first(desc="Generate a sample from the model"): + inputs = tokenizer( + "<|system|>\n<|end|>\n<|user|>\nHow many helicopters can a human eat in one sitting?<|end|>\n<|assistant|>", + return_tensors="pt", + return_token_type_ids=False, + ).to(training_args.device) + outputs = model.generate( + **inputs, + max_new_tokens=256, + pad_token_id=tokenizer.eos_token_id, + eos_token_id=tokenizer.convert_tokens_to_ids(dialogue_template.end_token), + ) + logger.info(f"=== SAMPLE OUTPUT ==\n\n{tokenizer.decode(outputs[0], skip_special_tokens=False)}") + + +if __name__ == "__main__": + main() diff --git a/starcoder-main/chat/utils.py b/starcoder-main/chat/utils.py new file mode 100644 index 0000000..e17884d --- /dev/null +++ b/starcoder-main/chat/utils.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +import os +from dataclasses import dataclass +from typing import List, Optional + +from huggingface_hub import login +from transformers import HfArgumentParser + + +class StarChatArgumentParser(HfArgumentParser): + def parse_yaml_and_args(self, yaml_arg: str, other_args: Optional[List[str]] = None) -> List[dataclass]: + arg_list = self.parse_yaml_file(os.path.abspath(yaml_arg)) + + outputs = [] + # strip other args list into dict of key-value pairs + other_args = {arg.split("=")[0].strip("-"): arg.split("=")[1] for arg in other_args} + used_args = {} + + # overwrite the default/loaded value with the value provided to the command line + # adapted from https://github.com/huggingface/transformers/blob/d0b5002378daabf62769159add3e7d66d3f83c3b/src/transformers/hf_argparser.py#L327 + for data_yaml, data_class in zip(arg_list, self.dataclass_types): + keys = {f.name for f in dataclasses.fields(data_yaml) if f.init} + inputs = {k: v for k, v in vars(data_yaml).items() if k in keys} + for arg, val in other_args.items(): + # add only if in keys + if arg in keys: + base_type = data_yaml.__dataclass_fields__[arg].type + inputs[arg] = val + + # cast type for ints, floats, and bools (default to strings) + if base_type in [int, float, bool]: + inputs[arg] = base_type(val) + + # add to used-args so we can check if double add + if arg not in used_args: + used_args[arg] = val + else: + raise ValueError(f"Duplicate argument provided: {arg}, may cause unexpected behavior") + + obj = data_class(**inputs) + outputs.append(obj) + + return outputs + + +def hf_login(): + """Login to HuggingFace Hub if HF_TOKEN is defined in the environment""" + hf_token = os.getenv("HF_TOKEN") + if hf_token is not None: + login(token=hf_token) diff --git a/starcoder-main/finetune/finetune.py b/starcoder-main/finetune/finetune.py new file mode 100644 index 0000000..96ab961 --- /dev/null +++ b/starcoder-main/finetune/finetune.py @@ -0,0 +1,312 @@ +import argparse +import os + +import torch +from accelerate import Accelerator +from datasets import load_dataset +from peft import LoraConfig, get_peft_model, prepare_model_for_int8_training, set_peft_model_state_dict +from torch.utils.data import IterableDataset +from tqdm import tqdm +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, logging, set_seed +from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl +from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR + +""" +Fine-Tune StarCoder on Code Alpaca/SE +""" + +class SavePeftModelCallback(TrainerCallback): + def on_save( + self, + args: TrainingArguments, + state: TrainerState, + control: TrainerControl, + **kwargs, + ): + checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") + + kwargs["model"].save_pretrained(checkpoint_folder) + + pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") + torch.save({}, pytorch_model_path) + return control + + +class LoadBestPeftModelCallback(TrainerCallback): + def on_train_end( + self, + args: TrainingArguments, + state: TrainerState, + control: TrainerControl, + **kwargs, + ): + print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).") + best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin") + adapters_weights = torch.load(best_model_path) + model = kwargs["model"] + set_peft_model_state_dict(model, adapters_weights) + return control + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_path", type=str, default="bigcode/large-model") + parser.add_argument("--dataset_name", type=str, default="HuggingFaceH4/CodeAlpaca_20K") + parser.add_argument("--subset", type=str) + parser.add_argument("--split", type=str) + parser.add_argument("--size_valid_set", type=int, default=10000) + parser.add_argument("--streaming", action="store_true") + parser.add_argument("--shuffle_buffer", type=int, default=5000) + + parser.add_argument("--input_column_name", type=str, default="prompt") + parser.add_argument("--output_column_name", type=str, default="completion") + + parser.add_argument("--seq_length", type=int, default=2048) + parser.add_argument("--max_steps", type=int, default=10000) + parser.add_argument("--batch_size", type=int, default=1) + parser.add_argument("--gradient_accumulation_steps", type=int, default=16) + parser.add_argument("--eos_token_id", type=int, default=49152) + + parser.add_argument("--lora_r", type=int, default=16) + parser.add_argument("--lora_alpha", type=int, default=32) + parser.add_argument("--lora_dropout", type=float, default=0.05) + + parser.add_argument("--learning_rate", type=float, default=5e-6) + parser.add_argument("--lr_scheduler_type", type=str, default="cosine") + parser.add_argument("--num_warmup_steps", type=int, default=100) + parser.add_argument("--weight_decay", type=float, default=0.05) + + parser.add_argument("--local_rank", type=int, default=0) + parser.add_argument("--no_fp16", action="store_false") + parser.add_argument("--bf16", action="store_true", default=True) + parser.add_argument("--no_gradient_checkpointing", action="store_false", default=False) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--num_workers", type=int, default=None) + parser.add_argument("--output_dir", type=str, default="./checkpoints") + parser.add_argument("--log_freq", default=100, type=int) + parser.add_argument("--eval_freq", default=100, type=int) + parser.add_argument("--save_freq", default=1000, type=int) + + return parser.parse_args() + + +def chars_token_ratio(dataset, tokenizer, input_column_name="prompt", output_column_name="completion", nb_examples=400): + """ + Estimate the average number of characters per token in the dataset. + """ + total_characters, total_tokens = 0, 0 + for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): + text = prepare_sample_text(example, input_column_name, output_column_name) + total_characters += len(text) + if tokenizer.is_fast: + total_tokens += len(tokenizer(text).tokens()) + else: + total_tokens += len(tokenizer.tokenize(text)) + + return total_characters / total_tokens + + +def print_trainable_parameters(model): + """ + Prints the number of trainable parameters in the model. + """ + trainable_params = 0 + all_param = 0 + for _, param in model.named_parameters(): + all_param += param.numel() + if param.requires_grad: + trainable_params += param.numel() + print( + f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" + ) + + +def prepare_sample_text(example, input_column_name="prompt", output_column_name="completion"): + """Prepare the text from a sample of the dataset.""" + text = f"Question: {example[input_column_name]}\n\nAnswer: {example[output_column_name]}" + return text + + +class ConstantLengthDataset(IterableDataset): + """ + Iterable dataset that returns constant length chunks of tokens from stream of text files. + Args: + tokenizer (Tokenizer): The processor used for proccessing the data. + dataset (dataset.Dataset): Dataset with text files. + infinite (bool): If True the iterator is reset after dataset reaches end else stops. + seq_length (int): Length of token sequences to return. + num_of_sequences (int): Number of token sequences to keep in buffer. + chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer. + """ + + def __init__( + self, + tokenizer, + dataset, + infinite=False, + seq_length=1024, + num_of_sequences=1024, + chars_per_token=3.6, + input_column_name="prompt", + output_column_name="completion" + ): + self.tokenizer = tokenizer + self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id is not None else args.eos_token_id + self.dataset = dataset + self.seq_length = seq_length + self.infinite = infinite + self.current_size = 0 + self.max_buffer_size = seq_length * chars_per_token * num_of_sequences + self.input_column_name = input_column_name + self.output_column_name = output_column_name + + def __iter__(self): + iterator = iter(self.dataset) + more_examples = True + while more_examples: + buffer, buffer_len = [], 0 + while True: + if buffer_len >= self.max_buffer_size: + break + try: + buffer.append(prepare_sample_text(next(iterator), self.input_column_name, self.output_column_name)) + buffer_len += len(buffer[-1]) + except StopIteration: + if self.infinite: + iterator = iter(self.dataset) + else: + more_examples = False + break + tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"] + all_token_ids = [] + for tokenized_input in tokenized_inputs: + all_token_ids.extend(tokenized_input + [self.concat_token_id]) + for i in range(0, len(all_token_ids), self.seq_length): + input_ids = all_token_ids[i : i + self.seq_length] + if len(input_ids) == self.seq_length: + self.current_size += 1 + yield { + "input_ids": torch.LongTensor(input_ids), + "labels": torch.LongTensor(input_ids), + } + + +def create_datasets(tokenizer, args): + dataset = load_dataset( + args.dataset_name, + data_dir=args.subset, + split=args.split, + use_auth_token=True, + num_proc=args.num_workers if not args.streaming else None, + streaming=args.streaming, + ) + if args.streaming: + print("Loading the dataset in streaming mode") + valid_data = dataset.take(args.size_valid_set) + train_data = dataset.skip(args.size_valid_set) + train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed) + else: + train_data = dataset["train"] + valid_data = dataset["test"] + print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}") + + chars_per_token = chars_token_ratio(train_data, tokenizer, args.input_column_name, args.output_column_name) + print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}") + + train_dataset = ConstantLengthDataset( + tokenizer, + train_data, + infinite=True, + seq_length=args.seq_length, + chars_per_token=chars_per_token, + input_column_name=args.input_column_name, + output_column_name=args.output_column_name + ) + valid_dataset = ConstantLengthDataset( + tokenizer, + valid_data, + infinite=False, + seq_length=args.seq_length, + chars_per_token=chars_per_token, + input_column_name=args.input_column_name, + output_column_name=args.output_column_name + ) + return train_dataset, valid_dataset + + +def run_training(args, train_data, val_data): + print("Loading the model") + # disable caching mechanism when using gradient checkpointing + model = AutoModelForCausalLM.from_pretrained( + args.model_path, + use_auth_token=True, + use_cache=not args.no_gradient_checkpointing, + load_in_8bit=True, + device_map={"": Accelerator().process_index}, + ) + model = prepare_model_for_int8_training(model) + + lora_config = LoraConfig( + r=args.lora_r, + lora_alpha=args.lora_alpha, + lora_dropout=args.lora_dropout, + bias="none", + task_type="CAUSAL_LM", + target_modules = ["c_proj", "c_attn", "q_attn"] + ) + + model = get_peft_model(model, lora_config) + + print_trainable_parameters(model) + + train_data.start_iteration = 0 + + print("Starting main loop") + + training_args = TrainingArguments( + output_dir=args.output_dir, + dataloader_drop_last=True, + evaluation_strategy="steps", + max_steps=args.max_steps, + eval_steps=args.eval_freq, + save_steps=args.save_freq, + logging_steps=args.log_freq, + per_device_train_batch_size=args.batch_size, + per_device_eval_batch_size=args.batch_size, + learning_rate=args.learning_rate, + lr_scheduler_type=args.lr_scheduler_type, + warmup_steps=args.num_warmup_steps, + gradient_accumulation_steps=args.gradient_accumulation_steps, + gradient_checkpointing=not args.no_gradient_checkpointing, + fp16=not args.no_fp16, + bf16=args.bf16, + weight_decay=args.weight_decay, + run_name="StarCoder-finetuned", + report_to="wandb", + ddp_find_unused_parameters=False, + ) + + trainer = Trainer(model=model, args=training_args, train_dataset=train_data, eval_dataset=val_data, callbacks=[SavePeftModelCallback, LoadBestPeftModelCallback]) + + print("Training...") + trainer.train() + + print("Saving last checkpoint of the model") + model.save_pretrained(os.path.join(args.output_dir, "final_checkpoint/")) + + +def main(args): + tokenizer = AutoTokenizer.from_pretrained(args.model_path, use_auth_token=True) + train_dataset, eval_dataset = create_datasets(tokenizer, args) + run_training(args, train_dataset, eval_dataset) + + +if __name__ == "__main__": + args = get_args() + + set_seed(args.seed) + os.makedirs(args.output_dir, exist_ok=True) + + logging.set_verbosity_error() + + main(args) \ No newline at end of file diff --git a/starcoder-main/finetune/merge_peft_adapters.py b/starcoder-main/finetune/merge_peft_adapters.py new file mode 100644 index 0000000..218728f --- /dev/null +++ b/starcoder-main/finetune/merge_peft_adapters.py @@ -0,0 +1,40 @@ +from transformers import AutoModelForCausalLM, AutoTokenizer +from peft import PeftModel +import torch + +import os +import argparse + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_model_name_or_path", type=str, default="bigcode/large-model") + parser.add_argument("--peft_model_path", type=str, default="/") + parser.add_argument("--push_to_hub", action="store_true", default=True) + + return parser.parse_args() + +def main(): + args = get_args() + + base_model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_or_path, + return_dict=True, + torch_dtype=torch.float16 + ) + + model = PeftModel.from_pretrained(base_model, args.peft_model_path) + model = model.merge_and_unload() + + tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_or_path) + + if args.push_to_hub: + print(f"Saving to hub ...") + model.push_to_hub(f"{args.base_model_name_or_path}-merged", use_temp_dir=False, private=True) + tokenizer.push_to_hub(f"{args.base_model_name_or_path}-merged", use_temp_dir=False, private=True) + else: + model.save_pretrained(f"{args.base_model_name_or_path}-merged") + tokenizer.save_pretrained(f"{args.base_model_name_or_path}-merged") + print(f"Model saved to {args.base_model_name_or_path}-merged") + +if __name__ == "__main__" : + main() diff --git a/starcoder-main/requirements.txt b/starcoder-main/requirements.txt new file mode 100644 index 0000000..cce4192 --- /dev/null +++ b/starcoder-main/requirements.txt @@ -0,0 +1,5 @@ +tqdm==4.65.0 +transformers==4.28.1 +datasets==2.11.0 +huggingface-hub==0.13.4 +accelerate==0.18.0