Skip to content

Commit

Permalink
Docker Compose Fixes (#2939)
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinlu1248 authored Jan 12, 2024
1 parent 177f4f7 commit 742e317
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 17 deletions.
2 changes: 0 additions & 2 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,4 @@ services:
sh -c ". bin/startup.sh"
stdin_open: true
tty: true
environment:
- PORT=${PORT:-8080}
restart: unless-stopped
3 changes: 0 additions & 3 deletions sweepai/config/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,6 @@
)


OPENAI_DO_HAVE_32K_MODEL_ACCESS = (
os.environ.get("OPENAI_DO_HAVE_32K_MODEL_ACCESS", "true").lower() == "true"
)
OPENAI_USE_3_5_MODEL_ONLY = (
os.environ.get("OPENAI_USE_3_5_MODEL_ONLY", "false").lower() == "true"
)
Expand Down
13 changes: 1 addition & 12 deletions sweepai/core/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from sweepai.config.server import (
DEFAULT_GPT35_MODEL,
OPENAI_API_KEY,
OPENAI_DO_HAVE_32K_MODEL_ACCESS,
OPENAI_USE_3_5_MODEL_ONLY,
)
from sweepai.core.entities import Message
Expand Down Expand Up @@ -64,9 +63,7 @@ class ChatGPT(BaseModel):
)
]
prev_message_states: list[list[Message]] = []
model: ChatModel = (
"gpt-4-32k-0613" if OPENAI_DO_HAVE_32K_MODEL_ACCESS else "gpt-4-0613"
)
model: ChatModel = "gpt-4-0613"
chat_logger: ChatLogger | None
human_message: HumanMessagePrompt | None = None
file_change_paths: list[str] = []
Expand Down Expand Up @@ -242,14 +239,6 @@ def call_openai(
max_tokens = (
model_to_max_tokens[model] - int(messages_length) - gpt_4_buffer
) # this is for the function tokens
if (
model_to_max_tokens[model] - int(messages_length) - gpt_4_buffer < 3000
and not OPENAI_DO_HAVE_32K_MODEL_ACCESS
): # use 16k if it's OOC and no 32k
model = DEFAULT_GPT35_MODEL
max_tokens = (
model_to_max_tokens[model] - int(messages_length) - gpt_4_buffer
)
max_tokens = min(max_tokens, 4096)
max_tokens = (
min(requested_max_tokens, max_tokens)
Expand Down

0 comments on commit 742e317

Please sign in to comment.