Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

1. load configure from environment 2. support anthropic base url #342

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions backend/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,13 @@
# TODO: Should only be set to true when value is 'True', not any abitrary truthy value
import os

ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", None)
CFG_ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", None)
CFG_ANTHROPIC_BASE_URL = os.environ.get("ANTHROPIC_BASE_URL", None)

# Debugging-related
CFG_OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY",None)
CFG_OPENAI_BASE_URL = os.environ.get("OPENAI_BASE_URL",None)

# Debugging-related
SHOULD_MOCK_AI_RESPONSE = bool(os.environ.get("MOCK", False))
IS_DEBUG_ENABLED = bool(os.environ.get("IS_DEBUG_ENABLED", False))
DEBUG_DIR = os.environ.get("DEBUG_DIR", "")
Expand Down
16 changes: 7 additions & 9 deletions backend/evals/core.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import os
from config import ANTHROPIC_API_KEY
from config import CFG_ANTHROPIC_API_KEY,CFG_ANTHROPIC_BASE_URL,CFG_OPENAI_API_KEY,CFG_OPENAI_BASE_URL

from llm import Llm, stream_claude_response, stream_openai_response
from prompts import assemble_prompt
Expand All @@ -8,30 +8,28 @@

async def generate_code_core(image_url: str, stack: Stack, model: Llm) -> str:
prompt_messages = assemble_prompt(image_url, stack)
openai_api_key = os.environ.get("OPENAI_API_KEY")
anthropic_api_key = ANTHROPIC_API_KEY
openai_base_url = None

async def process_chunk(content: str):
pass

if model == Llm.CLAUDE_3_SONNET:
if not anthropic_api_key:
if not CFG_ANTHROPIC_API_KEY:
raise Exception("Anthropic API key not found")

completion = await stream_claude_response(
prompt_messages,
api_key=anthropic_api_key,
api_key=CFG_ANTHROPIC_API_KEY,
callback=lambda x: process_chunk(x),
base_url=CFG_ANTHROPIC_BASE_URL
)
else:
if not openai_api_key:
if not CFG_OPENAI_API_KEY:
raise Exception("OpenAI API key not found")

completion = await stream_openai_response(
prompt_messages,
api_key=openai_api_key,
base_url=openai_base_url,
api_key=CFG_OPENAI_API_KEY,
base_url=CFG_OPENAI_BASE_URL,
callback=lambda x: process_chunk(x),
model=model,
)
Expand Down
6 changes: 4 additions & 2 deletions backend/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,10 @@ async def stream_claude_response(
messages: List[ChatCompletionMessageParam],
api_key: str,
callback: Callable[[str], Awaitable[None]],
base_url:str|None = None
) -> str:

client = AsyncAnthropic(api_key=api_key)
client = AsyncAnthropic(api_key=api_key,base_url=base_url)

# Base parameters
model = Llm.CLAUDE_3_SONNET
Expand Down Expand Up @@ -135,9 +136,10 @@ async def stream_claude_response_native(
callback: Callable[[str], Awaitable[None]],
include_thinking: bool = False,
model: Llm = Llm.CLAUDE_3_OPUS,
base_url: str | None = None,
) -> str:

client = AsyncAnthropic(api_key=api_key)
client = AsyncAnthropic(api_key=api_key,base_url=base_url)

# Base model parameters
max_tokens = 4096
Expand Down
21 changes: 12 additions & 9 deletions backend/routes/generate_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import traceback
from fastapi import APIRouter, WebSocket
import openai
from config import ANTHROPIC_API_KEY, IS_PROD, SHOULD_MOCK_AI_RESPONSE
from config import CFG_ANTHROPIC_API_KEY,CFG_ANTHROPIC_BASE_URL,CFG_OPENAI_API_KEY,CFG_OPENAI_BASE_URL, IS_PROD, SHOULD_MOCK_AI_RESPONSE
from custom_types import InputMode
from llm import (
Llm,
Expand Down Expand Up @@ -105,7 +105,7 @@ async def throw_error(
openai_api_key = params["openAiApiKey"]
print("Using OpenAI API key from client-side settings dialog")
else:
openai_api_key = os.environ.get("OPENAI_API_KEY")
openai_api_key = CFG_OPENAI_API_KEY
if openai_api_key:
print("Using OpenAI API key from environment variable")

Expand All @@ -119,7 +119,7 @@ async def throw_error(
"No OpenAI API key found. Please add your API key in the settings dialog or add it to backend/.env file. If you add it to .env, make sure to restart the backend server."
)
return

openai_api_key:str = openai_api_key
# Get the OpenAI Base URL from the request. Fall back to environment variable if not provided.
openai_base_url = None
# Disable user-specified OpenAI Base URL in prod
Expand Down Expand Up @@ -219,7 +219,7 @@ async def process_chunk(content: str):
else:
try:
if validated_input_mode == "video":
if not ANTHROPIC_API_KEY:
if not CFG_ANTHROPIC_API_KEY:
await throw_error(
"Video only works with Anthropic models. No Anthropic API key found. Please add the environment variable ANTHROPIC_API_KEY to backend/.env"
)
Expand All @@ -228,32 +228,35 @@ async def process_chunk(content: str):
completion = await stream_claude_response_native(
system_prompt=VIDEO_PROMPT,
messages=prompt_messages, # type: ignore
api_key=ANTHROPIC_API_KEY,
api_key=CFG_ANTHROPIC_API_KEY,
callback=lambda x: process_chunk(x),
model=Llm.CLAUDE_3_OPUS,
include_thinking=True,
base_url=CFG_ANTHROPIC_BASE_URL
)
exact_llm_version = Llm.CLAUDE_3_OPUS
elif code_generation_model == Llm.CLAUDE_3_SONNET:
if not ANTHROPIC_API_KEY:
if not CFG_ANTHROPIC_API_KEY:
await throw_error(
"No Anthropic API key found. Please add the environment variable ANTHROPIC_API_KEY to backend/.env"
)
raise Exception("No Anthropic key")

completion = await stream_claude_response(
prompt_messages, # type: ignore
api_key=ANTHROPIC_API_KEY,
api_key=CFG_ANTHROPIC_API_KEY,
callback=lambda x: process_chunk(x),
base_url=CFG_ANTHROPIC_BASE_URL
)
exact_llm_version = code_generation_model
else:
completion = await stream_openai_response(
prompt_messages, # type: ignore
api_key=openai_api_key,
base_url=openai_base_url,
base_url=CFG_OPENAI_BASE_URL,
callback=lambda x: process_chunk(x),
model=code_generation_model,

)
exact_llm_version = code_generation_model
except openai.AuthenticationError as e:
Expand Down Expand Up @@ -307,7 +310,7 @@ async def process_chunk(content: str):
updated_html = await generate_images(
completion,
api_key=openai_api_key,
base_url=openai_base_url,
base_url=CFG_OPENAI_BASE_URL,
image_cache=image_cache,
)
else:
Expand Down
7 changes: 4 additions & 3 deletions backend/video_to_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from datetime import datetime
from prompts.claude_prompts import VIDEO_PROMPT
from utils import pprint_prompt
from config import ANTHROPIC_API_KEY
from config import CFG_ANTHROPIC_API_KEY,CFG_ANTHROPIC_BASE_URL,CFG_OPENAI_API_KEY,CFG_OPENAI_BASE_URL
from video.utils import extract_tag_content, assemble_claude_prompt_video
from llm import (
Llm,
Expand All @@ -32,7 +32,7 @@ async def main():
video_filename = "shortest.mov"
is_followup = False

if not ANTHROPIC_API_KEY:
if not CFG_ANTHROPIC_API_KEY:
raise ValueError("ANTHROPIC_API_KEY is not set")

# Get previous HTML
Expand Down Expand Up @@ -84,10 +84,11 @@ async def process_chunk(content: str):
completion = await stream_claude_response_native(
system_prompt=VIDEO_PROMPT,
messages=prompt_messages,
api_key=ANTHROPIC_API_KEY,
api_key=CFG_ANTHROPIC_API_KEY,
callback=lambda x: process_chunk(x),
model=Llm.CLAUDE_3_OPUS,
include_thinking=True,
base_url=CFG_ANTHROPIC_BASE_URL
)

end_time = time.time()
Expand Down