Skip to content

Commit

Permalink
Rlease version 0.1.7
Browse files Browse the repository at this point in the history
  • Loading branch information
LyuLumos committed Oct 10, 2023
1 parent 63250c4 commit dc59bd9
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 24 deletions.
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,17 @@ Tax: Hello! How can I assist you today?

Use `tax -h` to get more information.

### Parallel

You can use `tax --parallel` to run multiple processes at the same time. For example,
```
tax -p -i input.txt -o output.txt -k sk-xxx --option max_workers=3 --option chat_mode=openai
```

and put your prompts in `input.txt`, each line is a prompt. The results will be saved in `output.txt`.

OpenAI update the API policy. Please ensure that you have enough quota to run.

## Attention

You can see a directive after the generated command that says
Expand Down Expand Up @@ -97,4 +108,8 @@ Please execute it or not at your own discretion. I am not responsible for the co
- Feat: Add support for **Chat** on Linux. Now you can use tax as **ChatGPT CLI**!
- Feat: Add support for native Anthropic Claude API on Linux Shell, Windows cmd and Powershell.

#### 0.1.7

- Feat: Add support for parallel processing with openai mode.

</details>
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "terminal-agent-x"
version = "0.1.6"
version = "0.1.7"
authors = [
{ name="LyuLumos", email="[email protected]" },
]
Expand Down
42 changes: 19 additions & 23 deletions terminal_agent_x/tax.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def kill_process_tree(pid: int) -> None:


def fetch_code(openai_key: str, model: str, prompt: str, url_option: str, chat_flag: bool) -> str:
# print(f'fetch_code has been called with {openai_key}, {model}, {prompt}, {url_option}, {chat_flag}')
url, headers, terminal_headers, data = req_info(
openai_key, model, prompt, url_option, chat_flag)
if os.name == 'nt': # Windows
Expand All @@ -63,11 +64,11 @@ def fetch_code(openai_key: str, model: str, prompt: str, url_option: str, chat_f
command = f'{command} --ipv4' if model == 'claude' else command
else: # Linux
command = f"curl -s --location '{url}' --header '{headers[0]}' --header '{headers[1]}' --data '{data}'"
print(command)
# print(command)

try:
res, err = run_command_with_timeout(command, 60)
print(res)
# print(res)
# res = os.popen(command).read().encode('utf-8').decode('utf-8', 'ignore')
if model.lower() == 'dalle':
return json.loads(res)['data'][0]['url']
Expand Down Expand Up @@ -175,31 +176,27 @@ def chat(openai_key: str, model: str, url_option: str):
# print(conversation)


def parallel_ask(data_prompts, chat_mode, max_workers, output_file, model, **args):
def parallel_ask(data_prompts, chat_mode, api_key, url, max_workers, output_file, model, **args):
with concurrent.futures.ThreadPoolExecutor(max_workers=int(max_workers)) as executor:
future_to_prompt = []
for prompt in data_prompts:
future_to_prompt.append(executor.submit(
chat_mode, prompt=prompt, model=model, **args))
if chat_mode == 'openai':
future_to_prompt = {executor.submit(fetch_code, **args, openai_key=api_key, url_option=url, prompt=prompt, model=model, chat_flag=False): prompt for prompt in data_prompts}
results = []
for future in concurrent.futures.as_completed(future_to_prompt):
try:
data = future.result()
except Exception as exc:
data = str(type(exc))
results.append(data)
results.append(repr(data))
if output_file:
with open(output_file, 'w', encoding='utf-8') as f:
f.write('\n'.join(results))
f.write('\n'.join((results)))
f.close()


def load_prompts_file(model, path: str) -> str:
with open(path, 'r', encoding='utf-8') as f:
text = f.readlines()
text = [line.strip() for line in text]
wrappers = [chat_data_wrapper(model, prompt, False) for prompt in text]
return wrappers
return [line.strip() for line in text]


def main() -> None:
Expand All @@ -226,7 +223,7 @@ def main() -> None:
args = parser.parse_args()

prompt = ' '.join(args.prompt)
prompt = f'{prompt}\\n{load_file(args.input)}' if args.input else prompt
prompt = f'{prompt}\\n{load_file(args.input)}' if args.input and not args.parallel else prompt

key = args.key or os.environ.get('OpenAI_KEY')
if not key:
Expand All @@ -240,20 +237,19 @@ def main() -> None:
print(res)
return

# res = get_model_response(openai_key, args.model, prompt)
res = fetch_code(key, args.model, prompt, args.url, False)

if args.option and args.parallel:
custom_options = {}
for option in args.option:
key, value = option.split('=')
custom_options[key] = value
custom_options = {option.split('=')[0]: option.split('=')[1]
for option in args.option}

parallel_ask(data_prompts=load_prompts_file(args.model, args.input), output_file=args.output, model=args.model, api_key=key, url=args.url, **custom_options)
print(f'The results have been saved to {args.output}')
return

print(parallel_ask(data_prompts=load_prompts_file(args.model, args.input), output_file=args.output, model=args.model, **custom_options))
return

# tax -i input.txt -o output.txt -m gpt-3.5-turbo -u openai_gfw -k xxx --option chat_mode=fetch_code
# res = get_model_response(openai_key, args.model, prompt)
res = fetch_code(key, args.model, prompt, args.url, False)


if args.output:
with open(args.output, 'w', encoding='utf-8') as f:
f.write(res)
Expand Down

0 comments on commit dc59bd9

Please sign in to comment.