Skip to content

Commit

Permalink
Use faster model for examples
Browse files Browse the repository at this point in the history
Signed-off-by: elronbandel <[email protected]>
  • Loading branch information
elronbandel committed Feb 13, 2025
1 parent 8472838 commit 70f75ca
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 27 deletions.
4 changes: 2 additions & 2 deletions examples/evaluate_using_metrics_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@
split="test",
)

# Infer using Llama-3.2-1B base using HF API
# Infer using SmolLM2 using HF API
model = HFPipelineBasedInferenceEngine(
model_name="Qwen/Qwen1.5-0.5B-Chat", max_new_tokens=32
model_name="HuggingFaceTB/SmolLM2-135M-Instruct", max_new_tokens=32
)
# Change to this to infer with external APIs:
# CrossProviderInferenceEngine(model="llama-3-2-1b-instruct", provider="watsonx")
Expand Down
4 changes: 2 additions & 2 deletions examples/ner_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@
format="formats.chat_api",
)

# Infer using Llama-3.2-1B base using HF API
# Infer using SmolLM2 using HF API
# model = HFPipelineBasedInferenceEngine(
# model_name="Qwen/Qwen1.5-0.5B-Chat", max_new_tokens=32
# model_name="HuggingFaceTB/SmolLM2-135M-Instruct", max_new_tokens=32
# )
# Change to this to infer with external APIs:

Expand Down
4 changes: 2 additions & 2 deletions examples/qa_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
format="formats.chat_api",
)

# Infer using Llama-3.2-1B base using HF API
# Infer using SmolLM2 using HF API
model = HFPipelineBasedInferenceEngine(
model_name="Qwen/Qwen1.5-0.5B-Chat", max_new_tokens=32
model_name="HuggingFaceTB/SmolLM2-135M-Instruct", max_new_tokens=32
)
# Change to this to infer with external APIs:
# from unitxt.inference import CrossProviderInferenceEngine
Expand Down
4 changes: 2 additions & 2 deletions examples/standalone_evaluation_llm_as_judge.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@
max_test_instances=10,
)

# Infer using Llama-3.2-1B base using HF API
# Infer using SmolLM2 using HF API
model = HFPipelineBasedInferenceEngine(
model_name="Qwen/Qwen1.5-0.5B-Chat", max_new_tokens=32
model_name="HuggingFaceTB/SmolLM2-135M-Instruct", max_new_tokens=32
)
predictions = model(dataset)

Expand Down
4 changes: 2 additions & 2 deletions examples/standalone_qa_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@
)


# Infer using Llama-3.2-1B base using HF API
# Infer using SmolLM2 using HF API
model = HFPipelineBasedInferenceEngine(
model_name="Qwen/Qwen1.5-0.5B-Chat", max_new_tokens=32
model_name="HuggingFaceTB/SmolLM2-135M-Instruct", max_new_tokens=32
)
# Change to this to infer with external APIs:
# from unitxt.inference import CrossProviderInferenceEngine
Expand Down
69 changes: 52 additions & 17 deletions utils/.secrets.baseline
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,6 @@
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_baseline_file",
"filename": "utils/.secrets.baseline"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
Expand Down Expand Up @@ -127,22 +123,65 @@
}
],
"results": {
"examples/evaluate_using_metrics_ensemble.py": [
{
"type": "Base64 High Entropy String",
"filename": "examples/evaluate_using_metrics_ensemble.py",
"hashed_secret": "bed3655d44736098fa59a0661d724a73da7c5654",
"is_verified": false,
"line_number": 32
}
],
"examples/ner_evaluation.py": [
{
"type": "Base64 High Entropy String",
"filename": "examples/ner_evaluation.py",
"hashed_secret": "bed3655d44736098fa59a0661d724a73da7c5654",
"is_verified": false,
"line_number": 40
}
],
"examples/qa_evaluation.py": [
{
"type": "Base64 High Entropy String",
"filename": "examples/qa_evaluation.py",
"hashed_secret": "bed3655d44736098fa59a0661d724a73da7c5654",
"is_verified": false,
"line_number": 35
}
],
"examples/standalone_evaluation_llm_as_judge.py": [
{
"type": "Base64 High Entropy String",
"filename": "examples/standalone_evaluation_llm_as_judge.py",
"hashed_secret": "bed3655d44736098fa59a0661d724a73da7c5654",
"is_verified": false,
"line_number": 94
}
],
"examples/standalone_qa_evaluation.py": [
{
"type": "Base64 High Entropy String",
"filename": "examples/standalone_qa_evaluation.py",
"hashed_secret": "bed3655d44736098fa59a0661d724a73da7c5654",
"is_verified": false,
"line_number": 42
}
],
"src/unitxt/inference.py": [
{
"type": "Secret Keyword",
"filename": "src/unitxt/inference.py",
"hashed_secret": "aa6cd2a77de22303be80e1f632195d62d211a729",
"is_verified": false,
"line_number": 1294,
"is_secret": false
"line_number": 1294
},
{
"type": "Secret Keyword",
"filename": "src/unitxt/inference.py",
"hashed_secret": "c8f16a194efc59559549c7bd69f7bea038742e79",
"is_verified": false,
"line_number": 1779,
"is_secret": false
"line_number": 1779
}
],
"src/unitxt/loaders.py": [
Expand All @@ -151,8 +190,7 @@
"filename": "src/unitxt/loaders.py",
"hashed_secret": "840268f77a57d5553add023cfa8a4d1535f49742",
"is_verified": false,
"line_number": 595,
"is_secret": false
"line_number": 595
}
],
"src/unitxt/metrics.py": [
Expand All @@ -161,8 +199,7 @@
"filename": "src/unitxt/metrics.py",
"hashed_secret": "fa172616e9af3d2a24b5597f264eab963fe76889",
"is_verified": false,
"line_number": 70,
"is_secret": false
"line_number": 70
}
],
"tests/library/test_loaders.py": [
Expand All @@ -171,18 +208,16 @@
"filename": "tests/library/test_loaders.py",
"hashed_secret": "8d814baafe5d8412572dc520dcab83f60ce1375c",
"is_verified": false,
"line_number": 125,
"is_secret": false
"line_number": 125
},
{
"type": "Secret Keyword",
"filename": "tests/library/test_loaders.py",
"hashed_secret": "42a472ac88cd8d43a2c5ae0bd0bdf4626cdaba31",
"is_verified": false,
"line_number": 135,
"is_secret": false
"line_number": 135
}
]
},
"generated_at": "2025-02-12T09:37:42Z"
"generated_at": "2025-02-13T13:37:19Z"
}

0 comments on commit 70f75ca

Please sign in to comment.