|
| 1 | +# Copyright The Marin Authors |
| 2 | +# SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +""" |
| 5 | +FineWeb2 multilingual data and held-out eval bundles. |
| 6 | +
|
| 7 | +The eval bundle tokenizes FineWeb2's per-language ``test`` split directly from Hugging Face parquet files. This avoids |
| 8 | +downloading the full train split while still making held-out documents available as Levanter validation caches. |
| 9 | +""" |
| 10 | + |
| 11 | +import os.path |
| 12 | +from collections.abc import Sequence |
| 13 | +from typing import Literal |
| 14 | + |
| 15 | +from experiments.defaults import default_tokenize |
| 16 | +from experiments.llama import llama3_tokenizer |
| 17 | +from marin.execution.executor import executor_main |
| 18 | +from marin.processing.tokenize.data_configs import TokenizerStep |
| 19 | + |
| 20 | +FINEWEB2_DATASET_ID = "HuggingFaceFW/fineweb-2" |
| 21 | +FINEWEB2_PARQUET_REVISION = "345aeeb34ec379862323beb9b5530d9e7f94522d" |
| 22 | +FineWeb2Split = Literal["train", "test"] |
| 23 | +LevanterCacheSplit = Literal["train", "validation"] |
| 24 | +FINEWEB2_EVAL_SPLIT: FineWeb2Split = "test" |
| 25 | + |
| 26 | +# Top 50 configs by total row count from the Hugging Face Dataset Viewer /size endpoint for |
| 27 | +# HuggingFaceFW/fineweb-2 source revision af9c13333eb981300149d5ca60a8e9d659b276b9. |
| 28 | +FINEWEB2_TOP_50_BY_ROWS = ( |
| 29 | + "rus_Cyrl", |
| 30 | + "cmn_Hani", |
| 31 | + "deu_Latn", |
| 32 | + "jpn_Jpan", |
| 33 | + "fra_Latn", |
| 34 | + "ita_Latn", |
| 35 | + "por_Latn", |
| 36 | + "pol_Latn", |
| 37 | + "nld_Latn", |
| 38 | + "ind_Latn", |
| 39 | + "ces_Latn", |
| 40 | + "arb_Arab", |
| 41 | + "vie_Latn", |
| 42 | + "kor_Hang", |
| 43 | + "swe_Latn", |
| 44 | + "fas_Arab", |
| 45 | + "ron_Latn", |
| 46 | + "ukr_Cyrl", |
| 47 | + "hun_Latn", |
| 48 | + "ell_Grek", |
| 49 | + "dan_Latn", |
| 50 | + "nob_Latn", |
| 51 | + "fin_Latn", |
| 52 | + "tha_Thai", |
| 53 | + "slk_Latn", |
| 54 | + "bul_Cyrl", |
| 55 | + "hin_Deva", |
| 56 | + "bos_Latn", |
| 57 | + "cat_Latn", |
| 58 | + "ben_Beng", |
| 59 | + "heb_Hebr", |
| 60 | + "lit_Latn", |
| 61 | + "slv_Latn", |
| 62 | + "ekk_Latn", |
| 63 | + "zsm_Latn", |
| 64 | + "als_Latn", |
| 65 | + "lvs_Latn", |
| 66 | + "azj_Latn", |
| 67 | + "hrv_Latn", |
| 68 | + "tam_Taml", |
| 69 | + "npi_Deva", |
| 70 | + "urd_Arab", |
| 71 | + "mkd_Cyrl", |
| 72 | + "srp_Cyrl", |
| 73 | + "mar_Deva", |
| 74 | + "kat_Geor", |
| 75 | + "kaz_Cyrl", |
| 76 | + "mal_Mlym", |
| 77 | + "isl_Latn", |
| 78 | + "glg_Latn", |
| 79 | +) |
| 80 | + |
| 81 | +# Native-script South Asian/Indic configs available in FineWeb2, including every config written in an Indic script. |
| 82 | +# Romanized variants are deliberately omitted so the supplement tracks the primary written form of each language. |
| 83 | +FINEWEB2_INDIC_LANGUAGE_CONFIGS = ( |
| 84 | + "anp_Deva", |
| 85 | + "asm_Beng", |
| 86 | + "awa_Deva", |
| 87 | + "ben_Beng", |
| 88 | + "bho_Deva", |
| 89 | + "bpy_Beng", |
| 90 | + "brx_Deva", |
| 91 | + "div_Thaa", |
| 92 | + "doi_Deva", |
| 93 | + "gom_Deva", |
| 94 | + "grt_Beng", |
| 95 | + "guj_Gujr", |
| 96 | + "hin_Deva", |
| 97 | + "hne_Deva", |
| 98 | + "kan_Knda", |
| 99 | + "kas_Arab", |
| 100 | + "kas_Deva", |
| 101 | + "kle_Deva", |
| 102 | + "lif_Deva", |
| 103 | + "mag_Deva", |
| 104 | + "mai_Deva", |
| 105 | + "mal_Mlym", |
| 106 | + "mar_Deva", |
| 107 | + "mni_Beng", |
| 108 | + "mni_Mtei", |
| 109 | + "mup_Deva", |
| 110 | + "new_Deva", |
| 111 | + "npi_Deva", |
| 112 | + "ory_Orya", |
| 113 | + "pan_Guru", |
| 114 | + "rav_Deva", |
| 115 | + "san_Deva", |
| 116 | + "sat_Olck", |
| 117 | + "sck_Deva", |
| 118 | + "sin_Sinh", |
| 119 | + "skr_Arab", |
| 120 | + "snd_Arab", |
| 121 | + "snd_Deva", |
| 122 | + "suz_Deva", |
| 123 | + "taj_Deva", |
| 124 | + "tam_Taml", |
| 125 | + "tcy_Knda", |
| 126 | + "tel_Telu", |
| 127 | + "thl_Deva", |
| 128 | + "urd_Arab", |
| 129 | + "xsr_Deva", |
| 130 | +) |
| 131 | + |
| 132 | +FINEWEB2_MULTILINGUAL_EVAL_CONFIGS = tuple(dict.fromkeys((*FINEWEB2_TOP_50_BY_ROWS, *FINEWEB2_INDIC_LANGUAGE_CONFIGS))) |
| 133 | + |
| 134 | +_FINEWEB2_TOP_50_BY_ROWS_SET = frozenset(FINEWEB2_TOP_50_BY_ROWS) |
| 135 | +_FINEWEB2_INDIC_LANGUAGE_CONFIGS_SET = frozenset(FINEWEB2_INDIC_LANGUAGE_CONFIGS) |
| 136 | + |
| 137 | + |
| 138 | +def fineweb2_multilingual_parquet_pattern(config: str, split: FineWeb2Split) -> str: |
| 139 | + """Return the pinned Hugging Face parquet pattern for a FineWeb2 language config split.""" |
| 140 | + return f"hf://datasets/{FINEWEB2_DATASET_ID}@{FINEWEB2_PARQUET_REVISION}/{config}/{split}/*.parquet" |
| 141 | + |
| 142 | + |
| 143 | +def fineweb2_multilingual_tags(config: str) -> list[str]: |
| 144 | + """Return Levanter eval tags for aggregate multilingual, script, language, and subset metrics.""" |
| 145 | + assert "_" in config, f"Expected FineWeb2 config in lang_Script form, got {config!r}" |
| 146 | + language, script = config.rsplit("_", maxsplit=1) |
| 147 | + tags = [ |
| 148 | + "fineweb2_multilingual", |
| 149 | + f"fineweb2_multilingual/script/{script}", |
| 150 | + f"fineweb2_multilingual/language/{language}", |
| 151 | + ] |
| 152 | + if config in _FINEWEB2_TOP_50_BY_ROWS_SET: |
| 153 | + tags.append("fineweb2_multilingual/top_50_by_rows") |
| 154 | + if config in _FINEWEB2_INDIC_LANGUAGE_CONFIGS_SET: |
| 155 | + tags.append("fineweb2_multilingual/indic") |
| 156 | + return tags |
| 157 | + |
| 158 | + |
| 159 | +def fineweb2_multilingual_tokenized( |
| 160 | + *, |
| 161 | + split: FineWeb2Split, |
| 162 | + configs: Sequence[str] = FINEWEB2_MULTILINGUAL_EVAL_CONFIGS, |
| 163 | + cache_split: LevanterCacheSplit = "train", |
| 164 | + name_prefix: str | None = None, |
| 165 | + tokenizer: str = llama3_tokenizer, |
| 166 | +) -> dict[str, TokenizerStep]: |
| 167 | + """Return tokenization steps for selected FineWeb2 multilingual configs and split.""" |
| 168 | + steps: dict[str, TokenizerStep] = {} |
| 169 | + if name_prefix is None: |
| 170 | + name_prefix = os.path.join("fineweb2_multilingual", split) |
| 171 | + for config in configs: |
| 172 | + name = os.path.join(name_prefix, config) |
| 173 | + steps[name] = default_tokenize( |
| 174 | + name=name, |
| 175 | + dataset=fineweb2_multilingual_parquet_pattern(config, split), |
| 176 | + tokenizer=tokenizer, |
| 177 | + is_validation=cache_split == "validation", |
| 178 | + tags=fineweb2_multilingual_tags(config), |
| 179 | + ) |
| 180 | + return steps |
| 181 | + |
| 182 | + |
| 183 | +def fineweb2_multilingual_eval_bundle(*, tokenizer: str = llama3_tokenizer) -> dict[str, TokenizerStep]: |
| 184 | + """Return the opt-in tokenization bundle for selected FineWeb2 multilingual held-out eval configs.""" |
| 185 | + return fineweb2_multilingual_tokenized( |
| 186 | + split=FINEWEB2_EVAL_SPLIT, |
| 187 | + cache_split="validation", |
| 188 | + name_prefix="fineweb2_multilingual_eval", |
| 189 | + tokenizer=tokenizer, |
| 190 | + ) |
| 191 | + |
| 192 | + |
| 193 | +if __name__ == "__main__": |
| 194 | + executor_main( |
| 195 | + steps=list(fineweb2_multilingual_eval_bundle().values()), |
| 196 | + description="Tokenize FineWeb2 multilingual held-out eval sets", |
| 197 | + ) |
0 commit comments