diff --git a/README.md b/README.md index 0e56ef1..6b847fd 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ - Install ollama - `ollama run codellama` (first time and then you can just have application in background) - There are probably other dependencies which I forgot to put in setup.py sorry in advance. -- run with `ducky --file --prompt (optional) ` +- Run with `ducky -f ` ## Why did I make this @@ -22,12 +22,25 @@ You will need Ollama installed on your machine. The model I use for this project For the first installation you can run `ollama run codellama` and it should pull the necessary binaries for you. Ollama is also great because it'll spin up a server which can run in the background and can even do automatic model switching as long as you have it installed. ## Usage -Make sure you have the package installed. Easiest through [pypi](https://pypi.org/project/rubber-ducky/). -`pip install rubber-ducky` also works. +Install through [pypi](https://pypi.org/project/rubber-ducky/): -To run: +`pip install rubber-ducky` . -`ducky --file --prompt (optional) ` +### Simple run +`ducky` -I have yet to implement some methods so if you do something I don't say that's on you. +### To use additional options: + +`ducky --file --prompt --directory --chain --model ` + +Where: +- `--prompt` or `-p`: Custom prompt to be used +- `--file` or `-f`: The file to be processed +- `--directory` or `-d`: The directory to be processed +- `--chain` or `-c`: Chain the output of the previous command to the next command +- `--model` or `-m`: The model to be used (default is "codellama") + + +## Example output +![Screenshot of ducky](image.png) \ No newline at end of file diff --git a/ducky/ducky.py b/ducky/ducky.py index fc37d99..0b2fb52 100644 --- a/ducky/ducky.py +++ b/ducky/ducky.py @@ -2,43 +2,95 @@ from typing import Optional from langchain.llms.ollama import Ollama from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler +from termcolor import colored +class RubberDuck: + """ + This class is a wrapper around the Ollama model. + """ + def __init__(self, model: str = "codellama") -> None: + """ + This function initializes the RubberDuck class. + + Args: + model (str, optional): The model to be used. Defaults to "codellama". + """ + self.system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code. + Help the user think through their approach and provide feedback on the code.""" + self.llm = Ollama(model=model, callbacks=[StreamingStdOutCallbackHandler()], system=self.system_prompt) + + + def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: bool = False) -> None: + """ + This function calls the Ollama model to provide feedback on the given code. + + Args: + code (str): The code to be reviewed. + prompt (Optional[str]): Custom prompt to be used. Defaults to None. + """ + if prompt is None: + prompt = "review the code, find any issues if any, suggest cleanups if any:" + code + else: + prompt = prompt + code -def call_llama(code: str, prompt: Optional[str] = None) -> None: + + self.llm(prompt) + if chain: + while(True): + prompt = input(colored("\n What's on your mind? \n ", 'green')) + self.llm(prompt) + + +def read_files_from_dir(directory: str) -> str: """ - This function calls the Ollama model to provide feedback on the given code. + This function reads all the files from a directory and returns the concatenated string. Args: - code (str): The code to be reviewed. - prompt (Optional[str], optional): Custom prompt to be used. Defaults to None. - """ - if prompt is None: - prompt = "review the code, find any issues if any, suggest cleanups if any:" + code - else: - prompt = prompt + code - system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code. Help the user rubber duck by providing feedback on the code.""" + directory (str): The directory to be processed. - # TODO: find out how to enable the python trained model - llm = Ollama(model="codellama", callbacks=[StreamingStdOutCallbackHandler()], system=system_prompt) + Returns: + str: The concatenated string of all the files. + """ + import os + files = os.listdir(directory) + code = "" + for file in files: + code += open(directory + "/" + file).read() + return code - # TODO: add chaining if it makes sense - llm(prompt) def ducky() -> None: """ This function parses the command line arguments and calls the Ollama model. """ parser = argparse.ArgumentParser() - parser.add_argument("--prompt", help="Custom prompt to be used", default=None) - parser.add_argument("--file", help="The file to be processed", default=None) - parser.add_argument("--directory", help="The directory to be processed", default=None) + parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None) + parser.add_argument("--file", "-f", help="The file to be processed", default=None) + parser.add_argument("--directory", "-d", help="The directory to be processed", default=None) + parser.add_argument("--chain", "-c", help="Chain the output of the previous command to the next command", action="store_true", default=False) + parser.add_argument("--model", "-m", help="The model to be used", default="codellama") args, _ = parser.parse_known_args() + # My testing has shown that the codellama:7b-python is good for returning python code from the program. + # My intention with this tool was to give more general feedback and have back a back and forth with the user. + rubber_ducky = RubberDuck(model=args.model) + if args.file is None and args.directory is None: + if args.chain: + while(True): + prompt = input(colored("\n What's on your mind? \n ", 'green')) + rubber_ducky.call_llama(prompt=prompt, chain=args.chain) + else: + prompt = input(colored("\n What's on your mind? \n ", 'green')) + rubber_ducky.call_llama(prompt=prompt, chain=args.chain) + if args.file is not None: code = open(args.file).read() - call_llama(code=code, prompt=args.prompt) - else: - raise Exception("No file provided") + rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain) + + elif args.directory is not None: + code = read_files_from_dir(args.directory) + rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain) + if __name__ == "__main__": ducky() diff --git a/image.png b/image.png new file mode 100644 index 0000000..0b00209 Binary files /dev/null and b/image.png differ diff --git a/setup.py b/setup.py index f2134a7..e84caf8 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='rubber-ducky', - version='1.0.1', + version='1.1.0', description='AI Companion for Pair Programming', long_description=long_description, long_description_content_type='text/markdown', @@ -16,6 +16,7 @@ packages=find_packages(), install_requires=[ 'langchain', + 'termcolor' ], entry_points={ 'console_scripts': [