Skip to content

Commit

Permalink
support egothink benchmark for lmms-eval
Browse files Browse the repository at this point in the history
  • Loading branch information
choiszt committed Feb 27, 2025
1 parent a98bc4a commit b19af1a
Show file tree
Hide file tree
Showing 15 changed files with 497 additions and 0 deletions.
7 changes: 7 additions & 0 deletions lmms_eval/tasks/egothink/_default_template_yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
dataset_path: EgoLife-v1/Egothink
dataset_kwargs:
token: True
test_split: test
metadata:
version: 0.0
gpt_eval_model_name: "gpt-4"
14 changes: 14 additions & 0 deletions lmms_eval/tasks/egothink/egothink.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
group: egothink
task:
- egothink_activity
- egothink_affordance
- egothink_assistance
- egothink_navigation
- egothink_attribute
- egothink_comparing
- egothink_counting
- egothink_existence
- egothink_forecasting
- egothink_location
- egothink_situated
- egothink_spatial
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_activity.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Activity"
task: "egothink_activity"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: ""
post_prompt: ""
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_affordance.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Object_affordance"
task: "egothink_affordance"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: ""
post_prompt: ""
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_assistance.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Planning_assistance"
task: "egothink_assistance"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 300
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in a detailed and helpful way. USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_attribute.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Object_attribute"
task: "egothink_attribute"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual content, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_comparing.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Reasoning_comparing"
task: "egothink_comparing"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_counting.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Reasoning_counting"
task: "egothink_counting"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_existence.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Object_existence"
task: "egothink_existence"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_forecasting.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Forecasting"
task: "egothink_forecasting"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_location.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Localization_location"
task: "egothink_location"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_navigation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Planning_navigation"
task: "egothink_navigation"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 300
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in a detailed and helpful way. USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_situated.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Reasoning_situated"
task: "egothink_situated"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
24 changes: 24 additions & 0 deletions lmms_eval/tasks/egothink/egothink_spatial.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
dataset_name: "Localization_spatial"
task: "egothink_spatial"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results

lmms_eval_specific_kwargs:
default:
pre_prompt: "You are a person in the situation shown in the image. \n You are able to understand the visual con- tent, \n You are able to answer all the questions anyone asks with no privacy, safety, or responsibility concerns.\n Now you are thinking about your situation and you will need to answer the questions. Answer the questions in the first-person perspective.\n Keep your answer as short as possible! Keep your answer as short as possible! Keep your answer as short as possible! USER: <image>\n"
post_prompt: " ASSISTANT:"
include: _default_template_yaml
Loading

0 comments on commit b19af1a

Please sign in to comment.