Skip to content

Commit

Permalink
Dev (#2940)
Browse files Browse the repository at this point in the history
# Purpose

Please provide a high-level overview of what this pull request aims to
achieve.

# Changes Made

Please provide a detailed list of the changes made in this pull request.

1.
2.
3.

# Additional Notes

Please provide any additional notes or screenshots here.
When you make a PR, please ping us on Discord at
http://discord.gg/sweep.

---------

Co-authored-by: Kevin Lu <[email protected]>
  • Loading branch information
wwzeng1 and kevinlu1248 authored Jan 13, 2024
1 parent 742e317 commit 3408bf0
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 27 deletions.
2 changes: 1 addition & 1 deletion sweepai/agents/assistant_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def openai_retry_with_timeout(call, *args, num_retries=3, timeout=5, **kwargs):
try:
return call(*args, **kwargs, timeout=timeout)
except Exception as e:
logger.error(f"Retry {attempt + 1} failed with error: {e}")
logger.exception(f"Retry {attempt + 1} failed with error: {e}")
error_message = str(e)
raise Exception(
f"Maximum retries reached. The call failed for call {error_message}"
Expand Down
2 changes: 2 additions & 0 deletions sweepai/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -967,6 +967,8 @@ def worker_wrapper():

thread = threading.Thread(target=worker_wrapper)
thread.start()
thread_killer = threading.Thread(target=delayed_kill, args=(thread,))
thread_killer.start()
return {"success": True}


Expand Down
7 changes: 1 addition & 6 deletions sweepai/core/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class ChatGPT(BaseModel):
)
]
prev_message_states: list[list[Message]] = []
model: ChatModel = "gpt-4-0613"
model: ChatModel = "gpt-4-1106-preview"
chat_logger: ChatLogger | None
human_message: HumanMessagePrompt | None = None
file_change_paths: list[str] = []
Expand Down Expand Up @@ -368,11 +368,6 @@ async def acall_openai(
messages_dicts.append(message_dict)

gpt_4_buffer = 800
# if int(messages_length) + gpt_4_buffer < 6000 and model == DEFAULT_GPT4_32K_MODEL:
# model = "gpt-4-0613"
# max_tokens = (
# model_to_max_tokens[model] - int(messages_length) - gpt_4_buffer
# ) # this is for the function tokens
if "gpt-4" in model:
max_tokens = min(max_tokens, 4096)
# Fix for self hosting where TPM limit is super low for GPT-4
Expand Down
16 changes: 10 additions & 6 deletions sweepai/core/context_pruning.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import json
import re
import textwrap
import time

from attr import dataclass
from loguru import logger
from openai.types.beta.thread import Thread
from openai.types.beta.threads.run import Run

from sweepai.agents.assistant_function_modify import MAX_CHARS
from sweepai.agents.assistant_wrapper import client, openai_retry_with_timeout
from sweepai.config.server import IS_SELF_HOSTED
from sweepai.core.entities import Snippet
Expand Down Expand Up @@ -275,6 +277,7 @@ def get_relevant_context(
unformatted_user_prompt=unformatted_user_prompt,
query=query,
)
messages = textwrap.wrap(user_prompt, MAX_CHARS)
assistant = openai_retry_with_timeout(
client.beta.assistants.create,
name="Relevant Files Assistant",
Expand All @@ -283,12 +286,13 @@ def get_relevant_context(
model=model,
)
thread = openai_retry_with_timeout(client.beta.threads.create)
_ = openai_retry_with_timeout(
client.beta.threads.messages.create,
thread.id,
role="user",
content=f"{user_prompt}",
)
for content in messages:
_ = openai_retry_with_timeout(
client.beta.threads.messages.create,
thread.id,
role="user",
content=content,
)
run = openai_retry_with_timeout(
client.beta.threads.runs.create,
thread_id=thread.id,
Expand Down
6 changes: 1 addition & 5 deletions sweepai/handlers/on_ticket.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,19 +356,15 @@ def delete_old_prs():
direction="desc",
base=SweepConfig.get_branch(repo),
)
checked_pr_count = 0
for pr in tqdm(prs):
for pr in tqdm(prs.get_page(0)):
# # Check if this issue is mentioned in the PR, and pr is owned by bot
# # This is done in create_pr, (pr_description = ...)
if checked_pr_count >= 40:
break
if (
pr.user.login == CURRENT_USERNAME
and f"Fixes #{issue_number}.\n" in pr.body
):
success = safe_delete_sweep_branch(pr, repo)
break
checked_pr_count += 1

fire_and_forget_wrapper(delete_old_prs)()

Expand Down
2 changes: 1 addition & 1 deletion sweepai/utils/github_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def get_jwt():


def get_token(installation_id: int):
if installation_id < 0:
if int(installation_id) < 0:
return os.environ["GITHUB_PAT"]
for timeout in [5.5, 5.5, 10.5]:
try:
Expand Down
1 change: 1 addition & 0 deletions sweepai/utils/openai_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ def determine_openai_engine(self, model):
elif (
model == "gpt-4"
or model == "gpt-4-0613"
or model == "gpt-4-1106-preview"
and OPENAI_API_ENGINE_GPT4 is not None
):
engine = OPENAI_API_ENGINE_GPT4
Expand Down
16 changes: 8 additions & 8 deletions sweepai/utils/ticket_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import traceback
from threading import Thread
from time import time

from loguru import logger
Expand Down Expand Up @@ -244,13 +243,14 @@ def fire_and_forget_wrapper(call):
"""

def wrapper(*args, **kwargs):
def run_in_thread(call, *a, **kw):
try:
call(*a, **kw)
except:
pass
return call(*args, **kwargs)
# def run_in_thread(call, *a, **kw):
# try:
# call(*a, **kw)
# except:
# pass

thread = Thread(target=run_in_thread, args=(call,) + args, kwargs=kwargs)
thread.start()
# thread = Thread(target=run_in_thread, args=(call,) + args, kwargs=kwargs)
# thread.start()

return wrapper

0 comments on commit 3408bf0

Please sign in to comment.