From dc59bd921e7cd39002e05c8c8b9dd5e185a552f4 Mon Sep 17 00:00:00 2001 From: NineOceans <44770303+LyuLumos@users.noreply.github.com> Date: Tue, 10 Oct 2023 18:50:28 +0000 Subject: [PATCH] Rlease version 0.1.7 --- README.md | 15 +++++++++++++++ pyproject.toml | 2 +- terminal_agent_x/tax.py | 42 +++++++++++++++++++---------------------- 3 files changed, 35 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 915a644..6c8000f 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,17 @@ Tax: Hello! How can I assist you today? Use `tax -h` to get more information. +### Parallel + +You can use `tax --parallel` to run multiple processes at the same time. For example, +``` +tax -p -i input.txt -o output.txt -k sk-xxx --option max_workers=3 --option chat_mode=openai +``` + +and put your prompts in `input.txt`, each line is a prompt. The results will be saved in `output.txt`. + +OpenAI update the API policy. Please ensure that you have enough quota to run. + ## Attention You can see a directive after the generated command that says @@ -97,4 +108,8 @@ Please execute it or not at your own discretion. I am not responsible for the co - Feat: Add support for **Chat** on Linux. Now you can use tax as **ChatGPT CLI**! - Feat: Add support for native Anthropic Claude API on Linux Shell, Windows cmd and Powershell. +#### 0.1.7 + +- Feat: Add support for parallel processing with openai mode. + \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 0115f9d..6128dad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "terminal-agent-x" -version = "0.1.6" +version = "0.1.7" authors = [ { name="LyuLumos", email="lyujiuyang0@gmail.com" }, ] diff --git a/terminal_agent_x/tax.py b/terminal_agent_x/tax.py index 03f132c..e360e1e 100644 --- a/terminal_agent_x/tax.py +++ b/terminal_agent_x/tax.py @@ -47,6 +47,7 @@ def kill_process_tree(pid: int) -> None: def fetch_code(openai_key: str, model: str, prompt: str, url_option: str, chat_flag: bool) -> str: + # print(f'fetch_code has been called with {openai_key}, {model}, {prompt}, {url_option}, {chat_flag}') url, headers, terminal_headers, data = req_info( openai_key, model, prompt, url_option, chat_flag) if os.name == 'nt': # Windows @@ -63,11 +64,11 @@ def fetch_code(openai_key: str, model: str, prompt: str, url_option: str, chat_f command = f'{command} --ipv4' if model == 'claude' else command else: # Linux command = f"curl -s --location '{url}' --header '{headers[0]}' --header '{headers[1]}' --data '{data}'" - print(command) + # print(command) try: res, err = run_command_with_timeout(command, 60) - print(res) + # print(res) # res = os.popen(command).read().encode('utf-8').decode('utf-8', 'ignore') if model.lower() == 'dalle': return json.loads(res)['data'][0]['url'] @@ -175,31 +176,27 @@ def chat(openai_key: str, model: str, url_option: str): # print(conversation) -def parallel_ask(data_prompts, chat_mode, max_workers, output_file, model, **args): +def parallel_ask(data_prompts, chat_mode, api_key, url, max_workers, output_file, model, **args): with concurrent.futures.ThreadPoolExecutor(max_workers=int(max_workers)) as executor: - future_to_prompt = [] - for prompt in data_prompts: - future_to_prompt.append(executor.submit( - chat_mode, prompt=prompt, model=model, **args)) + if chat_mode == 'openai': + future_to_prompt = {executor.submit(fetch_code, **args, openai_key=api_key, url_option=url, prompt=prompt, model=model, chat_flag=False): prompt for prompt in data_prompts} results = [] for future in concurrent.futures.as_completed(future_to_prompt): try: data = future.result() except Exception as exc: data = str(type(exc)) - results.append(data) + results.append(repr(data)) if output_file: with open(output_file, 'w', encoding='utf-8') as f: - f.write('\n'.join(results)) + f.write('\n'.join((results))) f.close() def load_prompts_file(model, path: str) -> str: with open(path, 'r', encoding='utf-8') as f: text = f.readlines() - text = [line.strip() for line in text] - wrappers = [chat_data_wrapper(model, prompt, False) for prompt in text] - return wrappers + return [line.strip() for line in text] def main() -> None: @@ -226,7 +223,7 @@ def main() -> None: args = parser.parse_args() prompt = ' '.join(args.prompt) - prompt = f'{prompt}\\n{load_file(args.input)}' if args.input else prompt + prompt = f'{prompt}\\n{load_file(args.input)}' if args.input and not args.parallel else prompt key = args.key or os.environ.get('OpenAI_KEY') if not key: @@ -240,20 +237,19 @@ def main() -> None: print(res) return - # res = get_model_response(openai_key, args.model, prompt) - res = fetch_code(key, args.model, prompt, args.url, False) - if args.option and args.parallel: - custom_options = {} - for option in args.option: - key, value = option.split('=') - custom_options[key] = value + custom_options = {option.split('=')[0]: option.split('=')[1] + for option in args.option} + + parallel_ask(data_prompts=load_prompts_file(args.model, args.input), output_file=args.output, model=args.model, api_key=key, url=args.url, **custom_options) + print(f'The results have been saved to {args.output}') + return - print(parallel_ask(data_prompts=load_prompts_file(args.model, args.input), output_file=args.output, model=args.model, **custom_options)) - return - # tax -i input.txt -o output.txt -m gpt-3.5-turbo -u openai_gfw -k xxx --option chat_mode=fetch_code + # res = get_model_response(openai_key, args.model, prompt) + res = fetch_code(key, args.model, prompt, args.url, False) + if args.output: with open(args.output, 'w', encoding='utf-8') as f: f.write(res)