Skip to content

Commit 82bc69e

Browse files
committed
Update instructions for ScienceQA
1 parent 37586d4 commit 82bc69e

File tree

5 files changed

+476
-3
lines changed

5 files changed

+476
-3
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ dist
66

77
# Log
88
*.log
9+
*.log.*
910
*.json
1011

1112
# Data

README.md

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,57 @@ python summarize_gpt_review.py
174174

175175
### ScienceQA
176176

177-
Please see ScienceQA [repo](https://github.com/lupantech/ScienceQA) for setting up the dataset. You may either use the official ScienceQA evaluation script or [our script](eval_science_qa.py) to evaluate the model.
177+
#### Prepare Data
178+
1. Please see ScienceQA [repo](https://github.com/lupantech/ScienceQA) for setting up the dataset.
179+
2. Generate ScienceQA dataset for LLaVA conversation-style format.
180+
181+
```Shell
182+
python scripts/convert_sqa_to_llava \
183+
convert_to_llava \
184+
--base-dir /path/to/ScienceQA/data/scienceqa \
185+
--split {train,val,minival,test,minitest}
186+
```
187+
188+
#### Evaluation
189+
190+
1. Download our pretrained LLaVA-13B (delta) weights for ScienceQA dataset [here](https://huggingface.co/liuhaotian/LLaVA-13b-delta-v0-science_qa). Convert the delta weights to actual weights following instructions [here](https://github.com/haotian-liu/LLaVA#llava-13b), and make sure to modify the command accordingly for ScienceQA.
191+
192+
2. Generate LLaVA responses on ScienceQA dataset
193+
194+
```Shell
195+
python -m llava.eval.model_vqa_science \
196+
--model-name /path/to/LLaVA-13b-v0-science_qa \
197+
--question-file /path/to/ScienceQA/data/scienceqa/llava_test.json \
198+
--image-folder /path/to/ScienceQA/data/scienceqa/images/test \
199+
--answers-file vqa/results/ScienceQA/test_llava-13b.jsonl
200+
```
201+
202+
Alternatively, you may evaluate this with multiple GPUs, and concatenate the generated jsonl files.
203+
204+
```Shell
205+
CHUNKS=8
206+
CHUNK_IDX=0
207+
CUDA_VISIBLE_DEVICES=CHUNK_IDX python model_vqa_science.py \
208+
--model-name /path/to/LLaVA-13b-v0-science_qa \
209+
--question-file /path/to/ScienceQA/data/scienceqa/llava_test.json \
210+
--image-folder /path/to/ScienceQA/data/scienceqa/images/test \
211+
--answers-file vqa/results/ScienceQA/test_llava-13b-chunk${CHUNKS}_${CHUNK_IDX}.jsonl \
212+
--num-chunks $CHUNKS \
213+
--chunk-idx $CHUNK_IDX
214+
215+
# after running this for all chunks, concatenate the results
216+
cat {...} > vqa/results/ScienceQA/test_llava-13b.jsonl
217+
```
218+
219+
3. Evaluate the generated responses
220+
221+
```Shell
222+
python eval_science_qa.py \
223+
--base-dir /path/to/ScienceQA/data/scienceqa \
224+
--result-file vqa/results/ScienceQA/test_llava-13b.jsonl \
225+
--output-file vqa/results/ScienceQA/test_llava-13b_output.json \
226+
--result-file vqa/results/ScienceQA/test_llava-13b_result.json \
227+
```
178228

179229
## Fine-tuning
180230
### Data

llava/eval/model_vqa_science.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -292,11 +292,11 @@ def eval_model(args):
292292
parser = argparse.ArgumentParser()
293293
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
294294
parser.add_argument("--image-folder", type=str, default="")
295-
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
295+
parser.add_argument("--question-file", type=str, default="tables/question.json")
296296
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
297297
parser.add_argument("--mm-projector", type=str, default=None)
298298
parser.add_argument("--vision-tower", type=str, default=None)
299-
parser.add_argument("--conv-mode", type=str, default="default")
299+
parser.add_argument("--conv-mode", type=str, default="simple")
300300
parser.add_argument("--num-chunks", type=int, default=1)
301301
parser.add_argument("--chunk-idx", type=int, default=0)
302302
parser.add_argument("--answer-prompter", action="store_true")

scripts/convert_sqa_to_llava.py

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import json
2+
import os
3+
import fire
4+
import re
5+
from convert_sqa_to_llava_base_prompt import build_prompt_chatbot
6+
7+
8+
def convert_to_llava(base_dir, split, prompt_format="QCM-LEPA"):
9+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
10+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
11+
12+
split_problems = build_prompt_chatbot(
13+
problems, split_indices, prompt_format,
14+
use_caption=False, is_test=False)
15+
16+
target_format = []
17+
for prob_id, (input, output) in split_problems.items():
18+
if input.startswith('Question: '):
19+
input = input.replace('Question: ', '')
20+
if output.startswith('Answer: '):
21+
output = output.replace('Answer: ', '')
22+
23+
raw_prob_data = problems[prob_id]
24+
if raw_prob_data['image'] is None:
25+
target_format.append({
26+
"id": prob_id,
27+
"conversations": [
28+
{'from': 'human', 'value': f"{input}"},
29+
{'from': 'gpt', 'value': f"{output}"},
30+
],
31+
})
32+
33+
else:
34+
target_format.append({
35+
"id": prob_id,
36+
"image": os.path.join(prob_id, raw_prob_data['image']),
37+
"conversations": [
38+
{'from': 'human', 'value': f"{input}\n<image>"},
39+
{'from': 'gpt', 'value': f"{output}"},
40+
],
41+
})
42+
43+
print(f'Number of samples: {len(target_format)}')
44+
45+
with open(os.path.join(base_dir, f"llava_{split}_{prompt_format}.json"), "w") as f:
46+
json.dump(target_format, f, indent=2)
47+
48+
49+
def convert_to_jsonl(base_dir, split, prompt_format="QCM-LEPA"):
50+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
51+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
52+
53+
split_problems = build_prompt_chatbot(
54+
problems, split_indices, prompt_format,
55+
use_caption=False, is_test=False)
56+
57+
writer = open(os.path.join(base_dir, f"scienceqa_{split}_{prompt_format}.jsonl"), "w")
58+
for prob_id, (input, output) in split_problems.items():
59+
if input.startswith('Question: '):
60+
input = input.replace('Question: ', '')
61+
if output.startswith('Answer: '):
62+
output = output.replace('Answer: ', '')
63+
64+
raw_prob_data = problems[prob_id]
65+
if raw_prob_data['image'] is None:
66+
data = {
67+
"id": prob_id,
68+
"instruction": f"{input}",
69+
"output": f"{output}",
70+
}
71+
72+
else:
73+
data = {
74+
"id": prob_id,
75+
"image": os.path.join(prob_id, raw_prob_data['image']),
76+
"instruction": f"{input}\n<image>",
77+
"output": f"{output}",
78+
}
79+
writer.write(json.dumps(data) + '\n')
80+
writer.close()
81+
82+
83+
def main(task, **kwargs):
84+
globals()[task](**kwargs)
85+
86+
87+
if __name__ == "__main__":
88+
fire.Fire(main)

0 commit comments

Comments
 (0)