|
| 1 | +from unitxt.blocks import LoadHF, Set, TaskCard |
| 2 | +from unitxt.catalog import add_to_catalog, get_from_catalog |
| 3 | +from unitxt.image_operators import ToImage |
| 4 | +from unitxt.splitters import RenameSplits |
| 5 | +from unitxt.templates import MultiReferenceTemplate |
| 6 | +from unitxt.test_utils.card import test_card |
| 7 | + |
| 8 | +templates = get_from_catalog("templates.qa.with_context.all") |
| 9 | +template = MultiReferenceTemplate( |
| 10 | + input_format="{context}\n{question}\nAnswer the question using a single word or phrase.", |
| 11 | + references_field="answers", |
| 12 | + __description__="lmms-evals default template for docvqa.", |
| 13 | +) |
| 14 | + |
| 15 | +card = TaskCard( |
| 16 | + loader=LoadHF( |
| 17 | + path="lmms-lab/DocVQA", |
| 18 | + name="DocVQA", |
| 19 | + data_classification_policy=["public"], |
| 20 | + splits=["train", "test", "validation"], |
| 21 | + ), |
| 22 | + preprocess_steps=[ |
| 23 | + RenameSplits(mapper={"validation": "test"}), |
| 24 | + ToImage(field="image", to_field="context"), |
| 25 | + Set(fields={"context_type": "image"}), |
| 26 | + ], |
| 27 | + task="tasks.qa.with_context.abstractive[metrics=[metrics.anls]]", |
| 28 | + templates=[template, *templates.items], |
| 29 | + default_template=template, |
| 30 | + __tags__={ |
| 31 | + "license": "apache-2.0", |
| 32 | + "multilinguality": "monolingual", |
| 33 | + "modalities": ["image", "text"], |
| 34 | + "size_categories": "10K<n<100K", |
| 35 | + "task_categories": "question-answering", |
| 36 | + "task_ids": "extractive-qa", |
| 37 | + }, |
| 38 | + __description__=( |
| 39 | + "The doc-vqa Dataset integrates images from the Infographic_vqa dataset sourced from HuggingFaceM4 The Cauldron dataset, as well as images from the dataset AFTDB (Arxiv Figure Table Database) curated by cmarkea. This dataset consists of pairs of images and corresponding text, with each image linked to an average of five questions and answers available in both English and French. These questions and answers were generated using Gemini 1.5 Pro, thereby rendering the dataset well-suited for multimodal tasks involving image-text pairing and multilingual question answering." |
| 40 | + ), |
| 41 | +) |
| 42 | + |
| 43 | +test_card(card) |
| 44 | +add_to_catalog(card, "cards.doc_vqa.lmms_eval", overwrite=True) |
0 commit comments