-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathconstants.py
More file actions
131 lines (112 loc) · 3.38 KB
/
constants.py
File metadata and controls
131 lines (112 loc) · 3.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
"""Common constants for evaluation framework."""
from ragas.metrics.collections import DistanceMeasure
DEFAULT_METRIC_THRESHOLD = 0.5
# NLP Metrics Constants - BLEU
DEFAULT_BLEU_MAX_NGRAM = 4 # Standard BLEU uses up to 4-grams
MIN_BLEU_NGRAM = 1
MAX_BLEU_NGRAM = 4
# NLP Metrics Constants - ROUGE Types
ROUGE_TYPE_ROUGE1 = "rouge1"
ROUGE_TYPE_ROUGE2 = "rouge2"
ROUGE_TYPE_ROUGEL = "rougeL"
ROUGE_TYPE_ROUGELSUM = "rougeLsum"
SUPPORTED_ROUGE_TYPES = [
ROUGE_TYPE_ROUGE1,
ROUGE_TYPE_ROUGE2,
ROUGE_TYPE_ROUGEL,
ROUGE_TYPE_ROUGELSUM,
]
# NLP Metrics Constants - ROUGE Modes
ROUGE_MODE_PRECISION = "precision"
ROUGE_MODE_RECALL = "recall"
ROUGE_MODE_FMEASURE = "fmeasure"
SUPPORTED_ROUGE_MODES = [
ROUGE_MODE_PRECISION,
ROUGE_MODE_RECALL,
ROUGE_MODE_FMEASURE,
]
# NLP Metrics Constants - Similarity Measures
SIMILARITY_LEVENSHTEIN = "levenshtein"
SIMILARITY_HAMMING = "hamming"
SIMILARITY_JARO = "jaro"
SIMILARITY_JARO_WINKLER = "jaro_winkler"
SUPPORTED_SIMILARITY_MEASURES = [
SIMILARITY_LEVENSHTEIN,
SIMILARITY_HAMMING,
SIMILARITY_JARO,
SIMILARITY_JARO_WINKLER,
]
DISTANCE_MEASURE_MAP = {
SIMILARITY_LEVENSHTEIN: DistanceMeasure.LEVENSHTEIN,
SIMILARITY_HAMMING: DistanceMeasure.HAMMING,
SIMILARITY_JARO: DistanceMeasure.JARO,
SIMILARITY_JARO_WINKLER: DistanceMeasure.JARO_WINKLER,
}
# API Constants
DEFAULT_API_BASE = "http://localhost:8080"
DEFAULT_API_VERSION = "v1"
DEFAULT_API_TIMEOUT = 300
DEFAULT_ENDPOINT_TYPE = "streaming"
SUPPORTED_ENDPOINT_TYPES = ["streaming", "query", "infer"]
DEFAULT_API_CACHE_DIR = ".caches/api_cache"
DEFAULT_API_NUM_RETRIES = 3
# Frameworks that don't require judge LLM (NLP, script-based evaluations)
NON_LLM_FRAMEWORKS = frozenset({"nlp", "script"})
DEFAULT_LLM_PROVIDER = "openai"
DEFAULT_LLM_MODEL = "gpt-4o-mini"
DEFAULT_SSL_VERIFY = True
DEFAULT_SSL_CERT_FILE = None
DEFAULT_LLM_TEMPERATURE = 0.0
DEFAULT_LLM_MAX_TOKENS = 512
DEFAULT_LLM_RETRIES = 5
DEFAULT_LLM_CACHE_DIR = ".caches/llm_cache"
DEFAULT_EMBEDDING_PROVIDER = "openai"
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small"
DEFAULT_EMBEDDING_CACHE_DIR = ".caches/embedding_cache"
DEFAULT_OUTPUT_DIR = "./eval_output"
DEFAULT_BASE_FILENAME = "evaluation"
DEFAULT_STORED_CONFIGS = ["llm", "embedding", "api"]
SUPPORTED_OUTPUT_TYPES = ["csv", "json", "txt"]
SUPPORTED_CSV_COLUMNS = [
"conversation_group_id",
"tag",
"turn_id",
"metric_identifier",
"metric_metadata",
"result",
"score",
"threshold",
"reason",
"query",
"response",
"execution_time",
"api_input_tokens",
"api_output_tokens",
"judge_llm_input_tokens",
"judge_llm_output_tokens",
# Per-judge scores (JSON array with one entry for single judge)
"judge_scores",
# Streaming performance metrics
"time_to_first_token",
"streaming_duration",
"tokens_per_second",
"tool_calls",
"contexts",
"expected_response",
"expected_intent",
"expected_keywords",
"expected_tool_calls",
]
SUPPORTED_GRAPH_TYPES = [
"pass_rates",
"score_distribution",
"conversation_heatmap",
"status_breakdown",
]
DEFAULT_VISUALIZATION_FIGSIZE = [12, 8]
DEFAULT_VISUALIZATION_DPI = 300
DEFAULT_LOG_SOURCE_LEVEL = "INFO"
DEFAULT_LOG_PACKAGE_LEVEL = "WARNING"
DEFAULT_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
DEFAULT_LOG_SHOW_TIMESTAMPS = True
SUPPORTED_RESULT_STATUSES = ["PASS", "FAIL", "ERROR", "SKIPPED"]