From 5656676a75d166de71cf86334397af05db170dec Mon Sep 17 00:00:00 2001 From: wunderwuzzi23 Date: Sat, 1 Jun 2024 17:00:34 -0700 Subject: [PATCH] add groq support --- README.md | 12 ++++++++++++ prompt.txt | 47 ++++++++++++++++++++++------------------------- requirements.txt | 1 + yolo.py | 25 +++++++++++++++---------- yolo.yaml | 14 +++++++------- 5 files changed, 57 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index edaecc1..a0e4b45 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,13 @@ ![Animated GIF](https://github.com/wunderwuzzi23/blog/raw/master/static/images/2023/yolo-shell-anim-gif.gif) +# Update Yolo v0.4 - Support for Groq + +* Added groq support. You can get an API key at `https://console.groq.com` and set mode to for instance `llama3-8b-8192`. groq is lightning fast. +* Simplified and improved default `prompt.txt`, +* Note: Testing shows that model `gpt-4o` gives the best results. + + # Update Yolo v0.3 - Support for Azure OpenAI * Key changes are upgrades to the latest OpenAI libraries and support for Azure OpenAI. There is an `api` key in the `yolo.yaml` that can be set to `azure_openai` and then you can provide all the parameters accordingly in the yaml file as well (`api-version`, your `azure-endpoint`,...). The api key for azure is called `AZURE_OPENAI_API_KEY` by the way. It can be set via environment variable and config file. @@ -60,6 +67,11 @@ There are three ways to configure the key on Linux and macOS: - Create a file at `~/.azureopenai.apikey` with the key in it - Set the key in the `yolo.yaml` configuration file +### Groq Configuration +- Grab an API key from `console.groq.com` +- You can either `export GROQ_API_KEY=`, or have a `.env` file in the same directory as `yolo.py` with `GROQ_API_KEY=""` as a line +- Set `api` and `model` (e.g llama3-8b-8192) in `yolo.yaml` configuration file + ## Aliases To set the alias, like `yolo` or `computer` on each login, add them to .bash_aliases (or .zshrc on macOS) file. Make sure the path is the one you want to use. diff --git a/prompt.txt b/prompt.txt index 54ca639..0575506 100644 --- a/prompt.txt +++ b/prompt.txt @@ -1,29 +1,26 @@ -Act as a natural language to {shell} command translation engine on {os}. You are an expert in {shell} on {os} and translate the question at the end to valid syntax. +You are Yolo, a natural language to {shell} command translation engine for {os}. You are an expert in {shell} on {os} and translate the question at the end to valid command line syntax. -Follow these rules: -Construct valid {shell} command that solve the question -Leverage help and man pages to ensure valid syntax and an optimal solution -Be concise -Just show the commands -Return only plaintext -Only show a single answer, but you can always chain commands together -Think step by step -Only create valid syntax (you can use comments if it makes sense) -If python is installed you can use it to solve problems -if python3 is installed you can use it to solve problems -Even if there is a lack of details, attempt to find the most logical solution by going about it step by step -Do not return multiple solutions -Do not show html, styled, colored formatting -Do not creating invalid syntax -Do not add unnecessary text in the response -Do not add notes or intro sentences -Do not show multiple distinct solutions to the question -Do not add explanations on what the commands do -Do not return what the question was -Do not repeat or paraphrase the question in your response -Do not cause syntax errors -Do not rush to a conclusion +Rules: +* No code style markdown output, ever. +* Construct valid {shell} command to solve the question +* Leverage help and man pages to ensure valid syntax and an optimal solution +* Be concise, think step by step, and show just final commands in plain text +* Only show a single answer, but you can always chain commands together +* Create valid syntax of {shell} on {os}, include comments if useful +* If python or python3 is installed you can use it to solve problems +* Even if there is a lack of details, find the most logical solution by going about it step by step +* Do not return multiple solutions +* Do not show html, styled, colored formatting +* Do not create invalid syntax or cause syntax errors +* Do not add unnecessary text in the response +* Do not add notes or intro sentences +* Do not show multiple distinct solutions to the question +* Do not add explanations on what the commands do +* Do not return what the question was +* Do not repeat or paraphrase the question in your response +* Do not rush to a conclusion +* Never start a response with ``` -Follow all of the above rules. This is important you MUST follow the above rules. There are no exceptions to these rules. You must always follow them. No exceptions. +Follow above rules. There are no exceptions to these rules. Question: diff --git a/requirements.txt b/requirements.txt index c6c20bc..1fb2f8b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,3 +5,4 @@ python-dotenv==1.0.1 distro==1.9.0 PyYAML==6.0.1 pyperclip==1.8.2 +groq==0.5.0 \ No newline at end of file diff --git a/yolo.py b/yolo.py index 4440c10..4e35b0a 100755 --- a/yolo.py +++ b/yolo.py @@ -8,6 +8,7 @@ import platform from openai import OpenAI from openai import AzureOpenAI +from groq import Groq import sys import subprocess import dotenv @@ -51,7 +52,7 @@ def get_full_prompt(user_prompt, shell): return prompt def print_usage(config): - print("Yolo v0.3 - by @wunderwuzzi23") + print("Yolo v0.4 - by @wunderwuzzi23 (June 1, 2024)") print() print("Usage: yolo [-a] list the current directory information") print("Argument: -a: Prompt the user before running the command (only useful when safety is off)") @@ -67,10 +68,8 @@ def print_usage(config): def get_os_friendly_name(): - - # Get OS Name os_name = platform.system() - + if os_name == "Linux": return "Linux/"+distro.name(pretty=True) elif os_name == "Windows": @@ -80,7 +79,6 @@ def get_os_friendly_name(): else: return os_name - def create_client(config): dotenv.load_dotenv() @@ -107,21 +105,28 @@ def create_client(config): api_key = api_key return OpenAI(api_key=api_key) + + if config["api"] == "groq": + api_key = os.getenv("GROQ_API_KEY") + if not api_key: + api_key=config["groq_api_key"] + + return Groq(api_key=api_key) -def call_open_ai(client, query, config, shell): +def chat_completion(client, query, config, shell): # do we have a prompt from the user? if query == "": print ("No user prompt specified.") sys.exit(-1) - # Load the correct prompt based on Shell and OS and append the user's prompt + # Load prompt based on Shell and OS and append the user's prompt prompt = get_full_prompt(query, shell) - # Make the first line also the system prompt + # Make the first line the system prompt system_prompt = prompt.split('\n')[0] #print(prompt) - # Call the API + # Call the Model API response = client.chat.completions.create( model=config["model"], messages=[ @@ -232,7 +237,7 @@ def main(): user_prompt = " ".join(arguments) ## core prompting loop logic - result = call_open_ai(client, user_prompt, config, shell) + result = chat_completion(client, user_prompt, config, shell) check_for_issue(result) check_for_markdown(result) diff --git a/yolo.yaml b/yolo.yaml index 7bf60b6..979780b 100644 --- a/yolo.yaml +++ b/yolo.yaml @@ -1,7 +1,5 @@ -api: openai # Set to azure_openai if you want to use Azure OpenAI API - -# If you have access update to gpt-4 or gpt-4-turbo-preview, if Azure this is the deployment name -model: gpt-4-turbo-preview +api: groq # openai, azure_openai, groq +model: gpt-4o # if azure_openai this is the deployment name, for groq, e.g llama3-8b-8192 # Azure specific (only needed if api: azure-openai) azure_endpoint: https://.openai.azure.com @@ -11,11 +9,13 @@ azure_api_version: 2024-02-15-preview temperature: 0 max_tokens: 500 -safety: True # Safety: If set to False, commands returned from the AI will be run *without* prompting the user. -modify: False # Enable prompt modify feature +safety: True # Safety: If set to False, commands from LLM run *without* prompting the user. +modify: False # Enable prompt modify feature suggested_command_color: blue # Suggested Command Color -# API Keys (optional): The key can aso be provided via environment variable (OPENAI_API_KEY, AZURE_OPENAI_API_KEY), .env, or ~/.openai.apikey file +# API Keys (optional): Preferred to use environment variables +# OPENAI_API_KEY, AZURE_OPENAI_API_KEY or GROQ_API_KEY (.env file is also supported) azure_openai_api_key: openai_api_key: +groq_api_key: