Skip to content

Commit

Permalink
fix ProcessPoolExecutor + response ray sigsev bug
Browse files Browse the repository at this point in the history
  • Loading branch information
erictang000 committed Feb 4, 2025
1 parent 50e79a0 commit cc7dd8b
Show file tree
Hide file tree
Showing 6 changed files with 15 additions and 5 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ cython_debug/
# Vim
*.swp

.json
*.json
token_usage/

run_all.sh
3 changes: 3 additions & 0 deletions skythought/skythought_evals/inference_and_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,9 @@ def inference(llm, conversations, max_tokens, temp, args):
responses = [
Response.from_ray_response(response) for response in responses.iter_rows()
]
import copy

responses = copy.deepcopy(responses)
responses = sorted(responses, key=lambda x: x.index)
elif args.model.startswith("openai"):
fetch_partial = partial(
Expand Down
6 changes: 3 additions & 3 deletions skythought/skythought_evals/ray_configs/ray_config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
llm_engine: vllm # currently only vllm supported
accelerator_type: A100-80G # accelerator name as specified here: https://docs.ray.io/en/master/ray-core/accelerator-types.html#accelerator-types
accelerator_type: H100 # accelerator name as specified here: https://docs.ray.io/en/master/ray-core/accelerator-types.html#accelerator-types
engine_kwargs: # vllm engine kwargs
tensor_parallel_size: 4
tensor_parallel_size: 1
gpu_memory_utilization: 0.9
# other optional vllm engine kwargs to tune performance!
# pipeline_parallel_size: 1
Expand All @@ -19,5 +19,5 @@ runtime_env:
env_vars:
VLLM_ATTENTION_BACKEND: "FLASH_ATTN"
env_config:
num_replicas: 2 # number of vllm replicas
num_replicas: 8 # number of vllm replicas
batch_size: 128 # ray pipeline internal batch size (used for map_batches call internally). Should usually be set to a value in [64, 128, 256] for best performance.
3 changes: 2 additions & 1 deletion skythought/skythought_evals/tasks/aime/aime.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ question_key: problem
answer_key: answer
templating_parameters:
regular_template: "Return your final response within \\boxed{{}}. {prompt}"
sky_template: "{prompt}\nReturn your final response within \\boxed{{}}"
sky_template: "{prompt}\nReturn your final response within \\boxed{{}}"
r1_template: "Please reason step by step, and put your final answer within \\boxed{{}}. {prompt}"
4 changes: 4 additions & 0 deletions skythought/skythought_evals/tasks/aime/aime_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ def generate_prompt(self, problem: Dict, model):
return self.task_config.templating_parameters["sky_template"].format(
prompt=problem["problem"]
)
elif "DeepSeek-R1" in MODEL_TO_NAME[model]:
return self.task_config.templating_parameters["r1_template"].format(
prompt=problem["problem"]
)
else:
return self.task_config.templating_parameters["regular_template"].format(
prompt=problem["problem"]
Expand Down
2 changes: 2 additions & 0 deletions skythought/skythought_evals/util/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
"openai/o1-mini": "Question: {input}\nAnswer: ",
"openai/o1-preview": "Question: {input}\nAnswer: ",
"openai/gpt-4o-mini": "User: {input}\nPlease reason step by step, and put your final answer within \\boxed{{}}.\n\nAssistant:",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B": "",
}

MODEL_TO_NAME = {
Expand All @@ -68,6 +69,7 @@
"openai/o1-mini": "o1-mini",
"openai/o1-preview": "o1-preview",
"openai/gpt-4o-mini": "gpt-4o-mini",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B": "DeepSeek-R1-Distill-Qwen-7B",
}

SUBPROBLEM_SPLIT_PROMPT = """
Expand Down

0 comments on commit cc7dd8b

Please sign in to comment.