diff --git a/backend/config.py b/backend/config.py index 05592b03..1a0aae1a 100644 --- a/backend/config.py +++ b/backend/config.py @@ -3,10 +3,13 @@ # TODO: Should only be set to true when value is 'True', not any abitrary truthy value import os -ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", None) +CFG_ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", None) +CFG_ANTHROPIC_BASE_URL = os.environ.get("ANTHROPIC_BASE_URL", None) -# Debugging-related +CFG_OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY",None) +CFG_OPENAI_BASE_URL = os.environ.get("OPENAI_BASE_URL",None) +# Debugging-related SHOULD_MOCK_AI_RESPONSE = bool(os.environ.get("MOCK", False)) IS_DEBUG_ENABLED = bool(os.environ.get("IS_DEBUG_ENABLED", False)) DEBUG_DIR = os.environ.get("DEBUG_DIR", "") diff --git a/backend/evals/core.py b/backend/evals/core.py index 5e053628..5f41f1a2 100644 --- a/backend/evals/core.py +++ b/backend/evals/core.py @@ -1,5 +1,5 @@ import os -from config import ANTHROPIC_API_KEY +from config import CFG_ANTHROPIC_API_KEY,CFG_ANTHROPIC_BASE_URL,CFG_OPENAI_API_KEY,CFG_OPENAI_BASE_URL from llm import Llm, stream_claude_response, stream_openai_response from prompts import assemble_prompt @@ -8,30 +8,28 @@ async def generate_code_core(image_url: str, stack: Stack, model: Llm) -> str: prompt_messages = assemble_prompt(image_url, stack) - openai_api_key = os.environ.get("OPENAI_API_KEY") - anthropic_api_key = ANTHROPIC_API_KEY - openai_base_url = None async def process_chunk(content: str): pass if model == Llm.CLAUDE_3_SONNET: - if not anthropic_api_key: + if not CFG_ANTHROPIC_API_KEY: raise Exception("Anthropic API key not found") completion = await stream_claude_response( prompt_messages, - api_key=anthropic_api_key, + api_key=CFG_ANTHROPIC_API_KEY, callback=lambda x: process_chunk(x), + base_url=CFG_ANTHROPIC_BASE_URL ) else: - if not openai_api_key: + if not CFG_OPENAI_API_KEY: raise Exception("OpenAI API key not found") completion = await stream_openai_response( prompt_messages, - api_key=openai_api_key, - base_url=openai_base_url, + api_key=CFG_OPENAI_API_KEY, + base_url=CFG_OPENAI_BASE_URL, callback=lambda x: process_chunk(x), model=model, ) diff --git a/backend/llm.py b/backend/llm.py index e5410463..cdcb0307 100644 --- a/backend/llm.py +++ b/backend/llm.py @@ -73,9 +73,10 @@ async def stream_claude_response( messages: List[ChatCompletionMessageParam], api_key: str, callback: Callable[[str], Awaitable[None]], + base_url:str|None = None ) -> str: - client = AsyncAnthropic(api_key=api_key) + client = AsyncAnthropic(api_key=api_key,base_url=base_url) # Base parameters model = Llm.CLAUDE_3_SONNET @@ -135,9 +136,10 @@ async def stream_claude_response_native( callback: Callable[[str], Awaitable[None]], include_thinking: bool = False, model: Llm = Llm.CLAUDE_3_OPUS, + base_url: str | None = None, ) -> str: - client = AsyncAnthropic(api_key=api_key) + client = AsyncAnthropic(api_key=api_key,base_url=base_url) # Base model parameters max_tokens = 4096 diff --git a/backend/routes/generate_code.py b/backend/routes/generate_code.py index e7186fcd..e2eb8f91 100644 --- a/backend/routes/generate_code.py +++ b/backend/routes/generate_code.py @@ -2,7 +2,7 @@ import traceback from fastapi import APIRouter, WebSocket import openai -from config import ANTHROPIC_API_KEY, IS_PROD, SHOULD_MOCK_AI_RESPONSE +from config import CFG_ANTHROPIC_API_KEY,CFG_ANTHROPIC_BASE_URL,CFG_OPENAI_API_KEY,CFG_OPENAI_BASE_URL, IS_PROD, SHOULD_MOCK_AI_RESPONSE from custom_types import InputMode from llm import ( Llm, @@ -105,7 +105,7 @@ async def throw_error( openai_api_key = params["openAiApiKey"] print("Using OpenAI API key from client-side settings dialog") else: - openai_api_key = os.environ.get("OPENAI_API_KEY") + openai_api_key = CFG_OPENAI_API_KEY if openai_api_key: print("Using OpenAI API key from environment variable") @@ -119,7 +119,7 @@ async def throw_error( "No OpenAI API key found. Please add your API key in the settings dialog or add it to backend/.env file. If you add it to .env, make sure to restart the backend server." ) return - + openai_api_key:str = openai_api_key # Get the OpenAI Base URL from the request. Fall back to environment variable if not provided. openai_base_url = None # Disable user-specified OpenAI Base URL in prod @@ -219,7 +219,7 @@ async def process_chunk(content: str): else: try: if validated_input_mode == "video": - if not ANTHROPIC_API_KEY: + if not CFG_ANTHROPIC_API_KEY: await throw_error( "Video only works with Anthropic models. No Anthropic API key found. Please add the environment variable ANTHROPIC_API_KEY to backend/.env" ) @@ -228,14 +228,15 @@ async def process_chunk(content: str): completion = await stream_claude_response_native( system_prompt=VIDEO_PROMPT, messages=prompt_messages, # type: ignore - api_key=ANTHROPIC_API_KEY, + api_key=CFG_ANTHROPIC_API_KEY, callback=lambda x: process_chunk(x), model=Llm.CLAUDE_3_OPUS, include_thinking=True, + base_url=CFG_ANTHROPIC_BASE_URL ) exact_llm_version = Llm.CLAUDE_3_OPUS elif code_generation_model == Llm.CLAUDE_3_SONNET: - if not ANTHROPIC_API_KEY: + if not CFG_ANTHROPIC_API_KEY: await throw_error( "No Anthropic API key found. Please add the environment variable ANTHROPIC_API_KEY to backend/.env" ) @@ -243,17 +244,19 @@ async def process_chunk(content: str): completion = await stream_claude_response( prompt_messages, # type: ignore - api_key=ANTHROPIC_API_KEY, + api_key=CFG_ANTHROPIC_API_KEY, callback=lambda x: process_chunk(x), + base_url=CFG_ANTHROPIC_BASE_URL ) exact_llm_version = code_generation_model else: completion = await stream_openai_response( prompt_messages, # type: ignore api_key=openai_api_key, - base_url=openai_base_url, + base_url=CFG_OPENAI_BASE_URL, callback=lambda x: process_chunk(x), model=code_generation_model, + ) exact_llm_version = code_generation_model except openai.AuthenticationError as e: @@ -307,7 +310,7 @@ async def process_chunk(content: str): updated_html = await generate_images( completion, api_key=openai_api_key, - base_url=openai_base_url, + base_url=CFG_OPENAI_BASE_URL, image_cache=image_cache, ) else: diff --git a/backend/video_to_app.py b/backend/video_to_app.py index c876804c..3395535b 100644 --- a/backend/video_to_app.py +++ b/backend/video_to_app.py @@ -14,7 +14,7 @@ from datetime import datetime from prompts.claude_prompts import VIDEO_PROMPT from utils import pprint_prompt -from config import ANTHROPIC_API_KEY +from config import CFG_ANTHROPIC_API_KEY,CFG_ANTHROPIC_BASE_URL,CFG_OPENAI_API_KEY,CFG_OPENAI_BASE_URL from video.utils import extract_tag_content, assemble_claude_prompt_video from llm import ( Llm, @@ -32,7 +32,7 @@ async def main(): video_filename = "shortest.mov" is_followup = False - if not ANTHROPIC_API_KEY: + if not CFG_ANTHROPIC_API_KEY: raise ValueError("ANTHROPIC_API_KEY is not set") # Get previous HTML @@ -84,10 +84,11 @@ async def process_chunk(content: str): completion = await stream_claude_response_native( system_prompt=VIDEO_PROMPT, messages=prompt_messages, - api_key=ANTHROPIC_API_KEY, + api_key=CFG_ANTHROPIC_API_KEY, callback=lambda x: process_chunk(x), model=Llm.CLAUDE_3_OPUS, include_thinking=True, + base_url=CFG_ANTHROPIC_BASE_URL ) end_time = time.time()