Skip to content

Commit

Permalink
Merge branch 'enable_multimodal_model' of https://github.com/intel/au…
Browse files Browse the repository at this point in the history
…to-round into enable_multimodal_model
  • Loading branch information
WeiweiZhang1 committed Jul 1, 2024
2 parents 0bddcb8 + b301c34 commit 1acee25
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
12 changes: 6 additions & 6 deletions examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def createIndex(self):
for ann in self.dataset['annotations']:
imgToQA[ann['image_id']] += [ann]
qa[ann['question_id']] = ann
for ques in self.questions['questions']:
qqa[ques['question_id']] = ques
for quest in self.questions['questions']:
qqa[quest['question_id']] = quest
print('index created!')

# create class members
Expand All @@ -75,7 +75,7 @@ def info(self):
:return:
"""
for key, value in self.datset['info'].items():
for key, value in self.dataset['info'].items():
print('%s: %s' % (key, value))

def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
Expand Down Expand Up @@ -162,8 +162,8 @@ def showQA(self, anns):
for ann in anns:
quesId = ann['question_id']
print('Question: %s' % (self.qqa[quesId]['question']))
for ans in ann['answers']:
print('Answer %d: %s' % (ans['answer_id'], ans['answer']))
for and in ann['answers']:
print('Answer %d: %s' % (and['answer_id'], and['answer']))

def loadRes(self, resFile, quesFile):
"""Load result file and return a result object.
Expand All @@ -187,7 +187,7 @@ def loadRes(self, resFile, quesFile):
annsQuesIds = [ann['question_id'] for ann in anns]
assert set(annsQuesIds) == set(
self.getQuesIds()
), 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'
), 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is at least one question id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann['question_id']
if res.dataset['task_type'] == 'Multiple Choice':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def evaluate(self, quesIds=None):
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']]
gtAnswers = [and['answer'] for and in gts[quesId]['answers']]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]['answers']:
ansDic['answer'] = self.processPunctuation(
Expand Down

0 comments on commit 1acee25

Please sign in to comment.