Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update tables benchmark name to torr #1617

Merged
merged 3 commits into from
Feb 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
34 changes: 34 additions & 0 deletions examples/evaluate_torr.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from unitxt import evaluate, load_dataset, settings
from unitxt.inference import (
CrossProviderInferenceEngine,
)

with settings.context(
allow_unverified_code=True,
mock_inference_mode=True,
):
test_dataset = load_dataset(
"benchmarks.torr",
split="test",
use_cache=True,
)
# Infer
model = CrossProviderInferenceEngine(
model="llama-3-8b-instruct",
max_tokens=30,
)
"""
We are using a CrossProviderInferenceEngine inference engine that supply api access to provider such as:
watsonx, bam, openai, azure, aws and more.

For the arguments these inference engines can receive, please refer to the classes documentation or read
about the the open ai api arguments the CrossProviderInferenceEngine follows.
"""

predictions = model(test_dataset)
results = evaluate(predictions=predictions, data=test_dataset)

print("Global scores:")
print(results.global_scores.summary)
print("Subsets scores:")
print(results.subsets_scores.summary)
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
tables_benchmark_dir = os.path.join(
constants.catalog_dir,
"recipes",
"tables_benchmark",
"torr",
)


# Recursive function to build nested benchmarks
def build_nested_benchmark(dir_path, prefix="recipes.tables_benchmark"):
def build_nested_benchmark(dir_path, prefix="recipes.torr"):
nested_scenarios = OrderedDict()

for entry in sorted(os.listdir(dir_path)):
Expand Down Expand Up @@ -44,7 +44,7 @@ def build_nested_benchmark(dir_path, prefix="recipes.tables_benchmark"):
benchmark = Benchmark(
tables_benchmark_scenarios.subsets,
__description__=(
"TablesBenchmark is an open-source benchmark developed by domain experts to evaluate various table-related tasks and capabilities.\n\n"
"Torr is an open-source benchmark developed by domain experts to evaluate various table-related tasks and capabilities.\n\n"
".. image:: https://raw.githubusercontent.com/IBM/unitxt/main/assets/catalog/tables_benchmark.png\n"
" :alt: Optional alt text\n"
" :width: 30%\n"
Expand All @@ -53,4 +53,4 @@ def build_nested_benchmark(dir_path, prefix="recipes.tables_benchmark"):
"It encompasses diverse domains and evaluates a range of capabilities, with additional tasks and domains integrated over time."
),
)
add_to_catalog(benchmark, "benchmarks.tables_benchmark", overwrite=True)
add_to_catalog(benchmark, "benchmarks.torr", overwrite=True)
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@

add_to_catalog(
DatasetRecipe(**kwargs),
f"recipes.tables_benchmark.{card}.{serializer}."
f"recipes.torr.{card}.{serializer}."
+ (",".join(augment).split("[")[0] if augment else "no")
+ f"_augmentation_{num_demos}_demos",
overwrite=True,
Expand Down

Large diffs are not rendered by default.

Loading
Loading