-
Notifications
You must be signed in to change notification settings - Fork 104
Expand file tree
/
Copy pathValidateCommand.cs
More file actions
755 lines (655 loc) · 34.7 KB
/
ValidateCommand.cs
File metadata and controls
755 lines (655 loc) · 34.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
using System.CommandLine;
using SkillValidator.Models;
using SkillValidator.Services;
using SkillValidator.Utilities;
using GitHub.Copilot.SDK;
namespace SkillValidator.Commands;
public static class ValidateCommand
{
public static RootCommand Create()
{
var pathsArg = new Argument<string[]>("paths") { Description = "Paths to skill directories or parent directories", Arity = ArgumentArity.OneOrMore };
var minImprovementOpt = new Option<double>("--min-improvement") { Description = "Minimum improvement score to pass (0-1)", DefaultValueFactory = _ => 0.1 };
var requireCompletionOpt = new Option<bool>("--require-completion") { Description = "Fail if skill regresses task completion", DefaultValueFactory = _ => true };
var requireEvalsOpt = new Option<bool>("--require-evals") { Description = "Fail if skill has no tests/eval.yaml" };
var verdictWarnOnlyOpt = new Option<bool>("--verdict-warn-only") { Description = "Treat verdict failures as warnings (exit 0). Execution errors, --require-evals, and spec conformance violations still fail." };
var verboseOpt = new Option<bool>("--verbose") { Description = "Show detailed per-scenario breakdowns" };
var modelOpt = new Option<string>("--model") { Description = "Model to use for agent runs", DefaultValueFactory = _ => "claude-opus-4.6" };
var judgeModelOpt = new Option<string?>("--judge-model") { Description = "Model to use for judging (defaults to --model)" };
var judgeModeOpt = new Option<string>("--judge-mode") { Description = "Judge mode: pairwise, independent, or both", DefaultValueFactory = _ => "pairwise" };
var runsOpt = new Option<int>("--runs") { Description = "Number of runs per scenario for averaging", DefaultValueFactory = _ => 5 };
var parallelSkillsOpt = new Option<int>("--parallel-skills") { Description = "Max concurrent skills to evaluate", DefaultValueFactory = _ => 1 };
var parallelScenariosOpt = new Option<int>("--parallel-scenarios") { Description = "Max concurrent scenarios per skill", DefaultValueFactory = _ => 1 };
var parallelRunsOpt = new Option<int>("--parallel-runs") { Description = "Max concurrent runs per scenario", DefaultValueFactory = _ => 1 };
var judgeTimeoutOpt = new Option<int>("--judge-timeout") { Description = "Judge timeout in seconds", DefaultValueFactory = _ => 300 };
var confidenceLevelOpt = new Option<double>("--confidence-level") { Description = "Confidence level for statistical intervals (0-1)", DefaultValueFactory = _ => 0.95 };
var resultsDirOpt = new Option<string>("--results-dir") { Description = "Directory to save results to", DefaultValueFactory = _ => ".skill-validator-results" };
var testsDirOpt = new Option<string?>("--tests-dir") { Description = "Directory containing test subdirectories" };
var reporterOpt = new Option<string[]>("--reporter") { Description = "Reporter (console, json, junit, markdown). Can be repeated.", AllowMultipleArgumentsPerToken = true };
var noOverfittingCheckOpt = new Option<bool>("--no-overfitting-check") { Description = "Disable LLM-based overfitting analysis (on by default)" };
var overfittingFixOpt = new Option<bool>("--overfitting-fix") { Description = "Generate a fixed eval.yaml with improved rubric items/assertions" };
var selectivityTestOpt = new Option<bool>("--selectivity-test") { Description = "Run selectivity test using should_activate / should_not_activate prompts from eval.yaml" };
var selectivityMinRecallOpt = new Option<double>("--selectivity-min-recall") { Description = "Minimum recall (activation on should_activate prompts) to pass (0-1)", DefaultValueFactory = _ => 0.8 };
var selectivityMinPrecisionOpt = new Option<double>("--selectivity-min-precision") { Description = "Minimum precision (non-activation on should_not_activate prompts) to pass (0-1)", DefaultValueFactory = _ => 0.8 };
var command = new RootCommand("Validate that agent skills meaningfully improve agent performance")
{
pathsArg,
minImprovementOpt,
requireCompletionOpt,
requireEvalsOpt,
verdictWarnOnlyOpt,
verboseOpt,
modelOpt,
judgeModelOpt,
judgeModeOpt,
runsOpt,
parallelSkillsOpt,
parallelScenariosOpt,
parallelRunsOpt,
judgeTimeoutOpt,
confidenceLevelOpt,
resultsDirOpt,
testsDirOpt,
reporterOpt,
noOverfittingCheckOpt,
overfittingFixOpt,
selectivityTestOpt,
selectivityMinRecallOpt,
selectivityMinPrecisionOpt,
};
command.SetAction(async (parseResult, _) =>
{
var paths = parseResult.GetValue(pathsArg) ?? [];
var reporterValues = parseResult.GetValue(reporterOpt) ?? [];
var reporters = reporterValues.Length > 0
? reporterValues.Select(ParseReporter).ToList()
: new List<ReporterSpec>
{
new(ReporterType.Console),
new(ReporterType.Json),
new(ReporterType.Markdown),
};
var judgeMode = parseResult.GetValue(judgeModeOpt) switch
{
"independent" => JudgeMode.Independent,
"both" => JudgeMode.Both,
_ => JudgeMode.Pairwise,
};
var config = new ValidatorConfig
{
MinImprovement = parseResult.GetValue(minImprovementOpt),
RequireCompletion = parseResult.GetValue(requireCompletionOpt),
RequireEvals = parseResult.GetValue(requireEvalsOpt),
Verbose = parseResult.GetValue(verboseOpt),
Model = parseResult.GetValue(modelOpt) ?? "claude-opus-4.6",
JudgeModel = parseResult.GetValue(judgeModelOpt) ?? parseResult.GetValue(modelOpt) ?? "claude-opus-4.6",
JudgeMode = judgeMode,
Runs = Math.Max(1, parseResult.GetValue(runsOpt)),
ParallelSkills = Math.Max(1, parseResult.GetValue(parallelSkillsOpt)),
ParallelScenarios = Math.Max(1, parseResult.GetValue(parallelScenariosOpt)),
ParallelRuns = Math.Max(1, parseResult.GetValue(parallelRunsOpt)),
JudgeTimeout = parseResult.GetValue(judgeTimeoutOpt) * 1000,
ConfidenceLevel = parseResult.GetValue(confidenceLevelOpt),
VerdictWarnOnly = parseResult.GetValue(verdictWarnOnlyOpt),
Reporters = reporters,
SkillPaths = paths,
ResultsDir = parseResult.GetValue(resultsDirOpt),
TestsDir = parseResult.GetValue(testsDirOpt),
OverfittingCheck = !parseResult.GetValue(noOverfittingCheckOpt),
OverfittingFix = parseResult.GetValue(overfittingFixOpt),
SelectivityTest = parseResult.GetValue(selectivityTestOpt),
SelectivityMinRecall = parseResult.GetValue(selectivityMinRecallOpt),
SelectivityMinPrecision = parseResult.GetValue(selectivityMinPrecisionOpt),
};
return await Run(config);
});
return command;
}
private static ReporterSpec ParseReporter(string value) => value switch
{
"console" => new ReporterSpec(ReporterType.Console),
"json" => new ReporterSpec(ReporterType.Json),
"junit" => new ReporterSpec(ReporterType.Junit),
"markdown" => new ReporterSpec(ReporterType.Markdown),
_ => throw new ArgumentException($"Unknown reporter type: {value}"),
};
public static async Task<int> Run(ValidatorConfig config)
{
// Validate model early
try
{
var client = await AgentRunner.GetSharedClient(config.Verbose);
var models = await client.ListModelsAsync();
var modelIds = models.Select(m => m.Id).ToList();
var modelsToValidate = new List<string> { config.Model };
if (config.JudgeModel != config.Model) modelsToValidate.Add(config.JudgeModel);
foreach (var m in modelsToValidate)
{
if (!modelIds.Contains(m))
{
Console.Error.WriteLine($"Invalid model: \"{m}\"\nAvailable models: {string.Join(", ", modelIds)}");
return 1;
}
}
Console.WriteLine($"Using model: {config.Model}" +
(config.JudgeModel != config.Model ? $", judge: {config.JudgeModel}" : "") +
$", judge-mode: {config.JudgeMode}");
}
catch (Exception error)
{
Console.Error.WriteLine($"Failed to validate model: {error}");
return 1;
}
if (config.Verbose)
Console.WriteLine($"Results dir: {config.ResultsDir}");
// Discover skills
var allSkills = new List<SkillInfo>();
foreach (var path in config.SkillPaths)
{
var skills = await SkillDiscovery.DiscoverSkills(path, config.TestsDir);
allSkills.AddRange(skills);
}
if (allSkills.Count == 0)
{
Console.Error.WriteLine("No skills found in the specified paths.");
return 1;
}
Console.WriteLine($"Found {allSkills.Count} skill(s)\n");
// Check per-plugin aggregate description size
var aggregateFailures = CheckAggregateDescriptionLimits(allSkills);
if (aggregateFailures.Count > 0)
{
foreach (var failure in aggregateFailures)
Console.Error.WriteLine($"\x1b[31m❌ {failure}\x1b[0m");
return 1;
}
if (config.Runs < 5)
Console.WriteLine($"\x1b[33m⚠ Running with {config.Runs} run(s). For statistically significant results, use --runs 5 or higher.\x1b[0m");
bool usePairwise = config.JudgeMode is JudgeMode.Pairwise or JudgeMode.Both;
using var spinner = new Spinner();
using var skillLimit = new ConcurrencyLimiter(config.ParallelSkills);
// Evaluate skills
spinner.Start($"Evaluating {allSkills.Count} skill(s)...");
var skillTasks = allSkills.Select(skill =>
skillLimit.RunAsync(() => EvaluateSkill(skill, config, usePairwise, spinner)));
var settled = await Task.WhenAll(skillTasks.Select(async t =>
{
try { return (Result: await t, Error: (Exception?)null); }
catch (Exception ex) { return (Result: (SkillVerdict?)null, Error: ex); }
}));
spinner.Stop();
var verdicts = new List<SkillVerdict>();
bool hasRejections = false;
foreach (var (result, error) in settled)
{
if (result is not null)
{
verdicts.Add(result);
}
else if (error is not null)
{
hasRejections = true;
Console.Error.WriteLine($"\x1b[31m❌ Skill evaluation failed: {error.Message}\x1b[0m");
}
}
await Reporter.ReportResults(verdicts, config.Reporters, config.Verbose,
config.Model, config.JudgeModel, config.ResultsDir);
await AgentRunner.StopSharedClient();
await AgentRunner.CleanupWorkDirs();
// Always fail on execution errors, even in --verdict-warn-only mode
if (hasRejections) return 1;
var allPassed = verdicts.All(v => v.Passed);
if (config.VerdictWarnOnly && !allPassed)
{
// In --verdict-warn-only mode, suppress verdict failures except missing_eval
// (which is controlled by --require-evals and should remain fatal) and
// spec_conformance_failure (structural violation that must always block).
var onlyWarnableFailures = verdicts.All(
v => v.Passed || (v.FailureKind != "missing_eval" && v.FailureKind != "spec_conformance_failure"));
if (onlyWarnableFailures) return 0;
}
return allPassed ? 0 : 1;
}
/// <summary>
/// Groups skills by plugin (derived from path) and checks that the aggregate
/// description length per plugin does not exceed the limit.
/// </summary>
internal static List<string> CheckAggregateDescriptionLimits(IReadOnlyList<SkillInfo> skills)
{
var failures = new List<string>();
// Group by plugin: convention is plugins/{plugin}/skills/{skill}/
// Derive plugin name by finding the "skills" ancestor directory.
var pluginGroups = skills
.GroupBy(s => DerivePluginName(s.Path))
.Where(g => g.Key is not null);
foreach (var group in pluginGroups)
{
int totalChars = group.Sum(s => s.Description.Length);
if (totalChars > SkillProfiler.MaxAggregateDescriptionLength)
{
failures.Add(
$"Plugin '{group.Key}' aggregate description size is {totalChars:N0} characters — " +
$"maximum is {SkillProfiler.MaxAggregateDescriptionLength:N0}.");
}
}
return failures;
}
/// <summary>
/// Derives the plugin name from a skill path by walking up to find the
/// "skills" directory and returning its parent directory name.
/// e.g. "plugins/dotnet-msbuild/skills/build-perf" → "dotnet-msbuild"
/// </summary>
internal static string? DerivePluginName(string skillPath)
{
var fullPath = Path.GetFullPath(skillPath);
var dir = new DirectoryInfo(fullPath);
while (dir is not null)
{
if (string.Equals(dir.Name, "skills", StringComparison.OrdinalIgnoreCase) && dir.Parent is not null)
return dir.Parent.Name;
dir = dir.Parent;
}
return null;
}
private static async Task<SkillVerdict?> EvaluateSkill(
SkillInfo skill,
ValidatorConfig config,
bool usePairwise,
Spinner spinner)
{
var prefix = $"[{skill.Name}]";
var log = (string msg) => spinner.Log($"{prefix} {msg}");
if (skill.EvalConfig is null)
{
if (config.RequireEvals)
{
return new SkillVerdict
{
SkillName = skill.Name,
SkillPath = skill.Path,
Passed = false,
Scenarios = [],
OverallImprovementScore = 0,
Reason = "No tests/eval.yaml found (required by --require-evals)",
FailureKind = "missing_eval",
};
}
log("⏭ Skipping (no tests/eval.yaml)");
return null;
}
if (skill.EvalConfig.Scenarios.Count == 0)
{
log("⏭ Skipping (eval.yaml has no scenarios)");
return null;
}
log("🔍 Evaluating...");
var profile = SkillProfiler.AnalyzeSkill(skill);
log($"📊 {SkillProfiler.FormatProfileLine(profile)}");
foreach (var error in profile.Errors)
log($" ❌ {error}");
foreach (var warning in SkillProfiler.FormatProfileWarnings(profile))
log(warning);
if (profile.Errors.Count > 0)
{
return new SkillVerdict
{
SkillName = skill.Name,
SkillPath = skill.Path,
Passed = false,
Scenarios = [],
OverallImprovementScore = 0,
Reason = string.Join(" ", profile.Errors),
FailureKind = "spec_conformance_failure",
};
}
// Selectivity-only mode: skip full evaluation, just probe skill activation
if (config.SelectivityTest)
{
if (skill.EvalConfig is not null
&& (skill.EvalConfig.ShouldActivatePrompts is { Count: > 0 } || skill.EvalConfig.ShouldNotActivatePrompts is { Count: > 0 }))
{
log("🎯 Running selectivity test (standalone)...");
var selectivityResult = await ExecuteSelectivityTest(skill, config, spinner);
log($"🎯 Selectivity: recall={selectivityResult.Recall:P0}, precision={selectivityResult.Precision:P0} — {(selectivityResult.Passed ? "PASSED" : "FAILED")}");
return new SkillVerdict
{
SkillName = skill.Name,
SkillPath = skill.Path,
Passed = selectivityResult.Passed,
Scenarios = [],
OverallImprovementScore = 0,
Reason = selectivityResult.Passed
? "Selectivity test passed"
: $"Selectivity test failed: {selectivityResult.Reason}",
FailureKind = selectivityResult.Passed ? null : "selectivity_failure",
ProfileWarnings = profile.Warnings,
SelectivityResult = selectivityResult,
};
}
log("⏭ Skipping (no selectivity prompts in eval.yaml)");
return null;
}
// Launch overfitting check in parallel with scenario execution
var workDir = Path.GetTempPath();
Task<OverfittingResult?> overfittingTask = Task.FromResult<OverfittingResult?>(null);
if (config.OverfittingCheck && skill.EvalConfig is not null)
{
log("🔍 Running overfitting check (parallel)...");
overfittingTask = Services.OverfittingJudge.Analyze(skill, new OverfittingJudgeOptions(
config.JudgeModel, config.Verbose, config.JudgeTimeout, workDir));
}
bool singleScenario = skill.EvalConfig!.Scenarios.Count == 1;
using var scenarioLimit = new ConcurrencyLimiter(config.ParallelScenarios);
var scenarioTasks = skill.EvalConfig.Scenarios.Select(scenario =>
scenarioLimit.RunAsync(() => ExecuteScenario(scenario, skill, config, usePairwise, singleScenario, spinner)));
var comparisons = (await Task.WhenAll(scenarioTasks)).ToList();
// Await overfitting result (non-fatal — never blocks an otherwise-successful evaluation)
OverfittingResult? overfittingResult = null;
try
{
overfittingResult = await overfittingTask;
if (overfittingResult is not null)
log($"🔍 Overfitting: {overfittingResult.Score:F2} ({overfittingResult.Severity})");
}
catch (Exception ex)
{
log($"⚠️ Overfitting check failed: {ex.Message}");
}
var verdict = Comparator.ComputeVerdict(skill, comparisons, config.MinImprovement, config.RequireCompletion, config.ConfidenceLevel);
verdict.ProfileWarnings = profile.Warnings;
verdict.OverfittingResult = overfittingResult;
// Optional: generate fixed eval.yaml
if (config.OverfittingFix && overfittingResult is { Severity: not OverfittingSeverity.Low })
{
try
{
await Services.OverfittingJudge.GenerateFix(skill, overfittingResult, new OverfittingJudgeOptions(
config.JudgeModel, config.Verbose, config.JudgeTimeout, workDir));
log("📝 Generated eval.fixed.yaml with suggested improvements");
}
catch (Exception ex)
{
log($"⚠️ Failed to generate overfitting fix: {ex.Message}");
}
}
var notActivated = comparisons.Where(c => c.SkillActivation is { Activated: false }).ToList();
// Separate unexpected non-activations (expect_activation defaulting to true)
// from expected ones (negative tests with expect_activation: false).
var unexpectedNotActivated = notActivated.Where(c => c.ExpectActivation).ToList();
var expectedNotActivated = notActivated.Where(c => !c.ExpectActivation).ToList();
if (expectedNotActivated.Count > 0)
{
var names = string.Join(", ", expectedNotActivated.Select(c => c.ScenarioName));
log($"\x1b[36mℹ️ Skill correctly NOT activated in negative-test scenario(s): {names}\x1b[0m");
}
if (unexpectedNotActivated.Count > 0)
{
var names = string.Join(", ", unexpectedNotActivated.Select(c => c.ScenarioName));
log($"\x1b[33m\u26a0\ufe0f Skill was NOT activated in scenario(s): {names}\x1b[0m");
verdict.SkillNotActivated = true;
verdict.Passed = false;
verdict.FailureKind = "skill_not_activated";
verdict.Reason += $" [SKILL NOT ACTIVATED in {unexpectedNotActivated.Count} scenario(s): {names}]";
}
var timedOutScenarios = comparisons.Where(c => c.TimedOut).ToList();
if (timedOutScenarios.Count > 0)
{
var names = string.Join(", ", timedOutScenarios.Select(c => c.ScenarioName));
log($"\x1b[33m⏰ Execution timed out in scenario(s): {names}\x1b[0m");
}
log($"{(verdict.Passed ? "✅" : "❌")} Done (score: {verdict.OverallImprovementScore * 100:F1}%)");
return verdict;
}
private static async Task<ScenarioComparison> ExecuteScenario(
EvalScenario scenario,
SkillInfo skill,
ValidatorConfig config,
bool usePairwise,
bool singleScenario,
Spinner spinner)
{
var tag = singleScenario ? $"[{skill.Name}]" : $"[{skill.Name}/{scenario.Name}]";
var scenarioLog = (string msg) => spinner.Log($"{tag} {msg}");
using var runLimit = new ConcurrencyLimiter(config.ParallelRuns);
if (!singleScenario)
scenarioLog("📋 Starting scenario");
var runTasks = Enumerable.Range(0, config.Runs).Select(i =>
runLimit.RunAsync(() => ExecuteRun(i, scenario, skill, config, usePairwise, singleScenario, spinner)));
var runResults = await Task.WhenAll(runTasks);
scenarioLog($"✓ All {config.Runs} run(s) complete");
var baselineRuns = runResults.Select(r => r.Baseline).ToList();
var withSkillRuns = runResults.Select(r => r.WithSkill).ToList();
var perRunPairwise = runResults.Select(r => r.Pairwise).ToList();
var perRunScores = new List<double>();
for (int i = 0; i < baselineRuns.Count; i++)
{
var runComparison = Comparator.CompareScenario(scenario.Name, baselineRuns[i], withSkillRuns[i], perRunPairwise[i]);
perRunScores.Add(runComparison.ImprovementScore);
}
var avgBaseline = AverageResults(baselineRuns);
var avgWithSkill = AverageResults(withSkillRuns);
var bestPairwise = perRunPairwise.FirstOrDefault(pw => pw?.PositionSwapConsistent == true)
?? perRunPairwise.FirstOrDefault();
var comparison = Comparator.CompareScenario(scenario.Name, avgBaseline, avgWithSkill, bestPairwise);
comparison.PerRunScores = perRunScores;
// Aggregate skill activation info
var allActivations = runResults.Select(r => r.SkillActivation).ToList();
comparison.SkillActivation = new SkillActivationInfo(
Activated: allActivations.Any(a => a.Activated),
DetectedSkills: allActivations.SelectMany(a => a.DetectedSkills).Distinct().ToList(),
ExtraTools: allActivations.SelectMany(a => a.ExtraTools).Distinct().ToList(),
SkillEventCount: allActivations.Sum(a => a.SkillEventCount));
// Propagate timeout info from any run
comparison.TimedOut = runResults.Any(r => r.WithSkill.Metrics.TimedOut || r.Baseline.Metrics.TimedOut);
// Propagate expect_activation from scenario config
comparison.ExpectActivation = scenario.ExpectActivation;
return comparison;
}
private sealed record RunExecutionResult(
RunResult Baseline,
RunResult WithSkill,
PairwiseJudgeResult? Pairwise,
SkillActivationInfo SkillActivation);
private static async Task<RunExecutionResult> ExecuteRun(
int runIndex,
EvalScenario scenario,
SkillInfo skill,
ValidatorConfig config,
bool usePairwise,
bool singleScenario,
Spinner spinner)
{
var runTag = config.Runs > 1
? (singleScenario ? $"[{skill.Name}/{runIndex + 1}]" : $"[{skill.Name}/{scenario.Name}/{runIndex + 1}]")
: (singleScenario ? $"[{skill.Name}]" : $"[{skill.Name}/{scenario.Name}]");
var runLog = (string msg) => spinner.Log($"{runTag} {msg}");
if (config.Verbose)
runLog("running agents...");
var agentTasks = await Task.WhenAll(
AgentRunner.RunAgent(new RunOptions(scenario, null, skill.EvalPath, config.Model, config.Verbose, Log: runLog)),
AgentRunner.RunAgent(new RunOptions(scenario, skill, skill.EvalPath, config.Model, config.Verbose, Log: runLog)));
var baselineMetrics = agentTasks[0];
var withSkillMetrics = agentTasks[1];
// Evaluate assertions
if (scenario.Assertions is { Count: > 0 })
{
baselineMetrics.AssertionResults = await AssertionEvaluator.EvaluateAssertions(scenario.Assertions, baselineMetrics.AgentOutput, baselineMetrics.WorkDir);
withSkillMetrics.AssertionResults = await AssertionEvaluator.EvaluateAssertions(scenario.Assertions, withSkillMetrics.AgentOutput, withSkillMetrics.WorkDir);
}
// Evaluate constraints
var baselineConstraints = AssertionEvaluator.EvaluateConstraints(scenario, baselineMetrics);
var withSkillConstraints = AssertionEvaluator.EvaluateConstraints(scenario, withSkillMetrics);
baselineMetrics.AssertionResults = [..baselineMetrics.AssertionResults, ..baselineConstraints];
withSkillMetrics.AssertionResults = [..withSkillMetrics.AssertionResults, ..withSkillConstraints];
// Task completion
if (scenario.Assertions is { Count: > 0 } || baselineConstraints.Count > 0)
{
baselineMetrics.TaskCompleted = baselineMetrics.AssertionResults.All(a => a.Passed);
withSkillMetrics.TaskCompleted = withSkillMetrics.AssertionResults.All(a => a.Passed);
}
else
{
baselineMetrics.TaskCompleted = baselineMetrics.ErrorCount == 0;
withSkillMetrics.TaskCompleted = withSkillMetrics.ErrorCount == 0;
}
// Judge — failures are non-fatal so a single timeout doesn't kill the whole evaluation.
// Await each judge independently so a failure in one doesn't discard the other's result.
var judgeOpts = new JudgeOptions(config.JudgeModel, config.Verbose, config.JudgeTimeout, baselineMetrics.WorkDir, skill.Path);
var baselineJudgeTask = Services.Judge.JudgeRun(scenario, baselineMetrics, judgeOpts);
var withSkillJudgeTask = Services.Judge.JudgeRun(
scenario, withSkillMetrics, judgeOpts with { WorkDir = withSkillMetrics.WorkDir });
JudgeResult baselineJudge;
try
{
baselineJudge = await baselineJudgeTask;
}
catch (Exception error)
{
var shortMsg = SanitizeErrorMessage(error.Message);
runLog($"\x1b[33m⚠️ Judge (baseline) failed, using fallback scores: {shortMsg}\x1b[0m");
baselineJudge = new JudgeResult([], 3, $"Judge failed: {shortMsg}");
}
JudgeResult withSkillJudge;
try
{
withSkillJudge = await withSkillJudgeTask;
}
catch (Exception error)
{
var shortMsg = SanitizeErrorMessage(error.Message);
runLog($"\x1b[33m⚠️ Judge (with skill) failed, using fallback scores: {shortMsg}\x1b[0m");
withSkillJudge = new JudgeResult([], 3, $"Judge failed: {shortMsg}");
}
var baseline = new RunResult(baselineMetrics, baselineJudge);
var withSkillResult = new RunResult(withSkillMetrics, withSkillJudge);
// Pairwise judging
PairwiseJudgeResult? pairwise = null;
if (usePairwise)
{
try
{
pairwise = await Services.PairwiseJudge.Judge(
scenario, baselineMetrics, withSkillMetrics,
new PairwiseJudgeOptions(config.JudgeModel, config.Verbose, config.JudgeTimeout, baselineMetrics.WorkDir, skill.Path));
}
catch (Exception error)
{
runLog($"⚠️ Pairwise judge failed: {error}");
}
}
// Skill activation
var skillActivation = MetricsCollector.ExtractSkillActivation(withSkillMetrics.Events, baselineMetrics.ToolCallBreakdown);
if (skillActivation.Activated)
{
var parts = new List<string>();
if (skillActivation.DetectedSkills.Count > 0) parts.Add($"skills: {string.Join(", ", skillActivation.DetectedSkills)}");
if (skillActivation.ExtraTools.Count > 0) parts.Add($"extra tools: {string.Join(", ", skillActivation.ExtraTools)}");
runLog($"🔌 Skill activated ({string.Join("; ", parts)})");
}
else
{
runLog("\x1b[33m⚠️ Skill was NOT activated during this run\x1b[0m");
}
if (config.Verbose)
runLog("✓ complete");
return new RunExecutionResult(baseline, withSkillResult, pairwise, skillActivation);
}
private static RunResult AverageResults(List<RunResult> runs)
{
if (runs.Count == 1) return runs[0];
static double Avg(IEnumerable<double> nums) => nums.Average();
static int AvgRound(IEnumerable<int> nums) => (int)Math.Round(nums.Average());
var avgMetrics = new RunMetrics
{
TokenEstimate = AvgRound(runs.Select(r => r.Metrics.TokenEstimate)),
ToolCallCount = AvgRound(runs.Select(r => r.Metrics.ToolCallCount)),
ToolCallBreakdown = runs[0].Metrics.ToolCallBreakdown,
TurnCount = AvgRound(runs.Select(r => r.Metrics.TurnCount)),
WallTimeMs = (long)Math.Round(runs.Average(r => r.Metrics.WallTimeMs)),
ErrorCount = AvgRound(runs.Select(r => r.Metrics.ErrorCount)),
TimedOut = runs.Any(r => r.Metrics.TimedOut),
AssertionResults = runs[^1].Metrics.AssertionResults,
TaskCompleted = runs.Any(r => r.Metrics.TaskCompleted),
AgentOutput = runs[^1].Metrics.AgentOutput,
Events = runs[^1].Metrics.Events,
WorkDir = runs[^1].Metrics.WorkDir,
};
var avgJudge = new JudgeResult(
runs[0].JudgeResult.RubricScores.Select((s, i) => new RubricScore(
s.Criterion,
Math.Round(Avg(runs.Select(r => i < r.JudgeResult.RubricScores.Count ? r.JudgeResult.RubricScores[i].Score : 3)) * 10) / 10,
s.Reasoning)).ToList(),
Math.Round(Avg(runs.Select(r => r.JudgeResult.OverallScore)) * 10) / 10,
runs[^1].JudgeResult.OverallReasoning);
return new RunResult(avgMetrics, avgJudge);
}
/// <summary>
/// Collapses multiline error messages to single-line and truncates to a reasonable length
/// so they don't bloat console/markdown reports.
/// </summary>
private static string SanitizeErrorMessage(string? message)
{
var raw = message ?? "unknown error";
var singleLine = raw.ReplaceLineEndings(" ");
return singleLine.Length > 150 ? singleLine[..150] + "…" : singleLine;
}
private static async Task<SelectivityResult> ExecuteSelectivityTest(SkillInfo skill, ValidatorConfig config, Spinner spinner)
{
var prefix = $"[{skill.Name}/selectivity]";
var log = (string msg) => spinner.Log($"{prefix} {msg}");
// Launch all probes in parallel
var tasks = new List<Task<SelectivityPromptResult>>();
if (skill.EvalConfig!.ShouldActivatePrompts is { } activatePrompts)
{
foreach (var prompt in activatePrompts)
{
log($"Testing should_activate: \"{Truncate(prompt, 60)}\"");
tasks.Add(ProbeAndLog(skill, prompt, expectedActivation: true, config, log));
}
}
if (skill.EvalConfig.ShouldNotActivatePrompts is { } deactivatePrompts)
{
foreach (var prompt in deactivatePrompts)
{
log($"Testing should_not_activate: \"{Truncate(prompt, 60)}\"");
tasks.Add(ProbeAndLog(skill, prompt, expectedActivation: false, config, log));
}
}
var promptResults = (await Task.WhenAll(tasks)).ToList();
// Calculate recall: fraction of should_activate prompts that actually activated
var shouldActivateResults = promptResults.Where(r => r.ExpectedActivation).ToList();
double recall = shouldActivateResults.Count > 0
? (double)shouldActivateResults.Count(r => r.SkillActivated) / shouldActivateResults.Count
: 1.0;
// Calculate precision: fraction of should_not_activate prompts that correctly did NOT activate
var shouldNotActivateResults = promptResults.Where(r => !r.ExpectedActivation).ToList();
double precision = shouldNotActivateResults.Count > 0
? (double)shouldNotActivateResults.Count(r => !r.SkillActivated) / shouldNotActivateResults.Count
: 1.0;
bool passed = recall >= config.SelectivityMinRecall && precision >= config.SelectivityMinPrecision;
var reasons = new List<string>();
if (recall < config.SelectivityMinRecall)
reasons.Add($"Recall {recall:P0} below threshold {config.SelectivityMinRecall:P0}");
if (precision < config.SelectivityMinPrecision)
reasons.Add($"Precision {precision:P0} below threshold {config.SelectivityMinPrecision:P0}");
string reason = passed ? "Selectivity test passed" : string.Join("; ", reasons);
return new SelectivityResult(promptResults, recall, precision, passed, reason);
}
private static async Task<SelectivityPromptResult> ProbeAndLog(
SkillInfo skill, string prompt, bool expectedActivation, ValidatorConfig config, Action<string> log)
{
var activated = await TestSkillActivation(skill, prompt, config);
if (expectedActivation)
log($" → {(activated ? "✅ activated" : "❌ NOT activated")}: \"{Truncate(prompt, 50)}\"");
else
log($" → {(activated ? "❌ activated (unexpected)" : "✅ correctly NOT activated")}: \"{Truncate(prompt, 50)}\"");
return new SelectivityPromptResult(prompt, ExpectedActivation: expectedActivation, SkillActivated: activated);
}
private static async Task<bool> TestSkillActivation(SkillInfo skill, string prompt, ValidatorConfig config)
{
var scenario = new EvalScenario(Name: "selectivity-probe", Prompt: prompt, Rubric: [], Timeout: 15);
return await AgentRunner.ProbeSkillActivation(new RunOptions(scenario, skill, skill.EvalPath, config.Model, config.Verbose));
}
private static string Truncate(string value, int maxLength) =>
value.Length <= maxLength ? value : value[..(maxLength - 1)] + "…";
}