From b301c343225a9bdbf6dd620f8dbdfc8348d8ace4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 28 Jun 2024 02:23:54 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../multimodal-modeling/Qwen-VL/mm_evaluation/vqa.py | 12 ++++++------ .../Qwen-VL/mm_evaluation/vqa_eval.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa.py b/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa.py index 652807d1..17a4e56f 100644 --- a/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa.py +++ b/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa.py @@ -61,8 +61,8 @@ def createIndex(self): for ann in self.dataset['annotations']: imgToQA[ann['image_id']] += [ann] qa[ann['question_id']] = ann - for ques in self.questions['questions']: - qqa[ques['question_id']] = ques + for quest in self.questions['questions']: + qqa[quest['question_id']] = quest print('index created!') # create class members @@ -75,7 +75,7 @@ def info(self): :return: """ - for key, value in self.datset['info'].items(): + for key, value in self.dataset['info'].items(): print('%s: %s' % (key, value)) def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): @@ -162,8 +162,8 @@ def showQA(self, anns): for ann in anns: quesId = ann['question_id'] print('Question: %s' % (self.qqa[quesId]['question'])) - for ans in ann['answers']: - print('Answer %d: %s' % (ans['answer_id'], ans['answer'])) + for and in ann['answers']: + print('Answer %d: %s' % (and['answer_id'], and['answer'])) def loadRes(self, resFile, quesFile): """Load result file and return a result object. @@ -187,7 +187,7 @@ def loadRes(self, resFile, quesFile): annsQuesIds = [ann['question_id'] for ann in anns] assert set(annsQuesIds) == set( self.getQuesIds() - ), 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.' + ), 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is at least one question id that does not belong to the question ids in the annotation file.' for ann in anns: quesId = ann['question_id'] if res.dataset['task_type'] == 'Multiple Choice': diff --git a/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa_eval.py b/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa_eval.py index a44e90eb..834654da 100644 --- a/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa_eval.py +++ b/examples/multimodal-modeling/Qwen-VL/mm_evaluation/vqa_eval.py @@ -216,7 +216,7 @@ def evaluate(self, quesIds=None): resAns = self.processPunctuation(resAns) resAns = self.processDigitArticle(resAns) gtAcc = [] - gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']] + gtAnswers = [and['answer'] for and in gts[quesId]['answers']] if len(set(gtAnswers)) > 1: for ansDic in gts[quesId]['answers']: ansDic['answer'] = self.processPunctuation(