Skip to content

Commit

Permalink
lower max_new_tokens, generation takes too long
Browse files Browse the repository at this point in the history
  • Loading branch information
RyanMarten committed Jan 19, 2025
1 parent 23cad3a commit 850402c
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion eval/chat_benchmarks/AIME24/eval_instruct.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(
super().__init__(logger)
self.data_file = data_file
self.debug = debug
self.max_new_tokens = 16384 # Setting this high to avoid truncation for reasoning models
self.max_new_tokens = 2048 # set higher to avoid truncation for reasoning models

def generate_responses(self, model: LM) -> Dict[str, Any]:
"""
Expand Down
2 changes: 1 addition & 1 deletion eval/chat_benchmarks/AMC23/eval_instruct.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(
super().__init__(logger)
self.data_file = data_file
self.debug = debug
self.max_new_tokens = 16384 # Setting this high to avoid truncation for reasoning models
self.max_new_tokens = 2048 # set higher to avoid truncation for reasoning models

def generate_responses(self, model: LM) -> Dict[str, Any]:
"""
Expand Down

0 comments on commit 850402c

Please sign in to comment.