Skip to content

Commit 9d44008

Browse files
committed
feat(core): scoring data customizable added with config file and CLI parameter
scoring threshold not reached returns exit code 1, other returns 0 scoring data tests and readme documentation added
1 parent 8c67e80 commit 9d44008

30 files changed

+1191
-20
lines changed

docs/guides/2-cli.md

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ Other options include:
3939
--stdin-filepath path to a file to pretend that stdin comes from [string]
4040
--resolver path to custom json-ref-resolver instance [string]
4141
-r, --ruleset path/URL to a ruleset file [string]
42+
-s, --scoring-config path/URL to a scoring config file [string]
4243
-F, --fail-severity results of this level or above will trigger a failure exit code
4344
[string] [choices: "error", "warn", "info", "hint"] [default: "error"]
4445
-D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false]
@@ -60,6 +61,92 @@ Here you can build a [custom ruleset](../getting-started/3-rulesets.md), or exte
6061
- [OpenAPI ruleset](../reference/openapi-rules.md)
6162
- [AsyncAPI ruleset](../reference/asyncapi-rules.md)
6263

64+
## Scoring the API
65+
66+
Scoring an API definition is a way to understand in a high level, how compliant is the API definition with the rulesets provided. This helps teams to understand the quality of the APIs regarding the definition.
67+
68+
The scoring is produced in two different metrics:
69+
70+
- A number scoring. Who cames as substracting from 100% from any error or warning
71+
- A letter, who groups numeric scorings in letters from A (better) to any
72+
73+
Also it introduces a quality gate, were an API scoring below the specific threshold will fail in a pipeline.
74+
75+
Enabling scoring is done using a new parameter called --scoring-config or -s and the scoring configuration file, where you can define how an error or a warning affects to the scoring
76+
77+
Usage:
78+
79+
```bash
80+
spectral lint ./reference/**/*.oas*.{json,yml,yaml} --ruleset mycustomruleset.js --scoring-config ./scoringFile.json
81+
```
82+
83+
or
84+
85+
```bash
86+
spectral lint ./reference/**/*.oas*.{json,yml,yaml} -r mycustomruleset.js -s ./scoringFile.json
87+
```
88+
89+
Heres an example of this scoringFile config file:
90+
91+
```
92+
{
93+
"scoringSubtract":
94+
{
95+
"error":
96+
{
97+
1:55,
98+
2:65,
99+
3:75,
100+
6:85,
101+
10:95
102+
}
103+
"warn":
104+
{
105+
1:3,
106+
2:7,
107+
3:10,
108+
6:15,
109+
10:18
110+
}
111+
},
112+
"scoringLetter":
113+
{
114+
"A":75,
115+
"B":65,
116+
"C":55,
117+
"D":45,
118+
"E":0
119+
},
120+
"threshold":50,
121+
"warningsSubtract": true,
122+
"uniqueErrors": false
123+
}
124+
```
125+
126+
Where:
127+
128+
- scoringSubtract : An object with a key/value pair objects for every result level we want to subtract percentage, with the percentage to subtract from number of results on every result type
129+
- scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater , for this letter
130+
- threshold : A number with minimum percentage value to provide valid the file we are checking
131+
- warningsSubtract : A boolean to setup if accumulate the result types to less the scoring percentage or stop counting on most critical result types
132+
- uniqueErrors : A boolean to setup a count with unique errors or with all of them
133+
134+
Example:
135+
136+
With previous scoring config file, if we have:
137+
138+
1 error, the scoring is 45% and D
139+
2 errors, the scoring is 35% and E
140+
3 errors, the scoring is 25% and E
141+
4 errors, the scoring is 25% and E
142+
and so on
143+
144+
Output:
145+
146+
Below your output log you can see the scoring, like:
147+
148+
✖ SCORING: A (93%)
149+
63150
## Error Results
64151

65152
Spectral has a few different error severities: `error`, `warn`, `info`, and `hint`, and they are in "order" from highest to lowest. By default, all results will be shown regardless of severity, but since v5.0, only the presence of errors will cause a failure status code of 1. Seeing results and getting a failure code for it are now two different things.

packages/cli/src/commands/__tests__/lint.test.ts

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,22 @@ describe('lint', () => {
146146
);
147147
});
148148

149+
it('calls lint with document, ruleset and scoring config file', async () => {
150+
const doc = './__fixtures__/empty-oas2-document.json';
151+
const ruleset = 'custom-ruleset.json';
152+
const configFile = 'scoring-config.json';
153+
await run(`lint -r ${ruleset} -s ${configFile} ${doc}`);
154+
expect(lint).toBeCalledWith([doc], {
155+
encoding: 'utf8',
156+
format: ['stylish'],
157+
output: { stylish: '<stdout>' },
158+
ruleset: 'custom-ruleset.json',
159+
stdinFilepath: undefined,
160+
ignoreUnknownFormat: false,
161+
failOnUnmatchedGlobs: false,
162+
});
163+
});
164+
149165
it.each(['json', 'stylish'])('calls formatOutput with %s format', async format => {
150166
await run(`lint -f ${format} ./__fixtures__/empty-oas2-document.json`);
151167
expect(formatOutput).toBeCalledWith(results, format, { failSeverity: DiagnosticSeverity.Error });
@@ -244,13 +260,13 @@ describe('lint', () => {
244260
expect(process.stderr.write).nthCalledWith(2, `Error #1: ${chalk.red('some unhandled exception')}\n`);
245261
expect(process.stderr.write).nthCalledWith(
246262
3,
247-
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:236`),
263+
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:252`),
248264
);
249265

250266
expect(process.stderr.write).nthCalledWith(4, `Error #2: ${chalk.red('another one')}\n`);
251267
expect(process.stderr.write).nthCalledWith(
252268
5,
253-
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:237`),
269+
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:253`),
254270
);
255271

256272
expect(process.stderr.write).nthCalledWith(6, `Error #3: ${chalk.red('original exception')}\n`);

packages/cli/src/commands/lint.ts

Lines changed: 45 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,14 @@ import { formatOutput, writeOutput } from '../services/output';
1414
import { FailSeverity, ILintConfig, OutputFormat } from '../services/config';
1515

1616
import { CLIError } from '../errors';
17+
import { ScoringConfig } from './../formatters/types';
18+
import {
19+
getScoringConfig,
20+
getScoringLevel,
21+
groupBySource,
22+
getCountsBySeverity,
23+
uniqueErrors,
24+
} from '../formatters//utils';
1725

1826
const formatOptions = Object.values(OutputFormat);
1927

@@ -127,6 +135,11 @@ const lintCommand: CommandModule = {
127135
description: 'path/URL to a ruleset file',
128136
type: 'string',
129137
},
138+
'scoring-config': {
139+
alias: 's',
140+
description: 'path/URL to a scoring config file',
141+
type: 'string',
142+
},
130143
'fail-severity': {
131144
alias: 'F',
132145
description: 'results of this level or above will trigger a failure exit code',
@@ -168,6 +181,7 @@ const lintCommand: CommandModule = {
168181
failSeverity,
169182
displayOnlyFailures,
170183
ruleset,
184+
scoringConfig,
171185
stdinFilepath,
172186
format,
173187
output,
@@ -197,20 +211,30 @@ const lintCommand: CommandModule = {
197211
results = filterResultsBySeverity(results, failSeverity);
198212
}
199213

214+
const scoringConfigData = getScoringConfig(scoringConfig);
215+
200216
await Promise.all(
201217
format.map(f => {
202-
const formattedOutput = formatOutput(results, f, { failSeverity: getDiagnosticSeverity(failSeverity) });
218+
const formattedOutput = formatOutput(results, f, {
219+
failSeverity: getDiagnosticSeverity(failSeverity),
220+
scoringConfig: scoringConfigData,
221+
});
203222
return writeOutput(formattedOutput, output?.[f] ?? '<stdout>');
204223
}),
205224
);
206225

207226
if (results.length > 0) {
208-
process.exit(severeEnoughToFail(results, failSeverity) ? 1 : 0);
227+
process.exit(
228+
scoringThresholdNotEnough(results, scoringConfigData) ? 1 : severeEnoughToFail(results, failSeverity) ? 1 : 0,
229+
);
209230
} else if (config.quiet !== true) {
210231
const isErrorSeverity = getDiagnosticSeverity(failSeverity) === DiagnosticSeverity.Error;
211232
process.stdout.write(
212233
`No results with a severity of '${failSeverity}' ${isErrorSeverity ? '' : 'or higher '}found!\n`,
213234
);
235+
if (scoringConfig !== void 0) {
236+
process.stdout.write(`SCORING: (100%)\nPASSED!`);
237+
}
214238
}
215239
} catch (ex) {
216240
fail(isError(ex) ? ex : new Error(String(ex)), config.verbose === true);
@@ -273,6 +297,25 @@ const filterResultsBySeverity = (results: IRuleResult[], failSeverity: FailSever
273297
return results.filter(r => r.severity <= diagnosticSeverity);
274298
};
275299

300+
const scoringThresholdNotEnough = (results: IRuleResult[], scoringConfig: ScoringConfig | undefined): boolean => {
301+
if (scoringConfig !== void 0) {
302+
const groupedResults = groupBySource(results);
303+
let groupedUniqueResults = { ...groupedResults };
304+
if (scoringConfig.uniqueErrors) {
305+
groupedUniqueResults = { ...groupBySource(uniqueErrors(results)) };
306+
}
307+
return (
308+
scoringConfig.threshold >
309+
getScoringLevel(
310+
getCountsBySeverity(groupedUniqueResults),
311+
scoringConfig.scoringSubtract,
312+
scoringConfig.warningsSubtract,
313+
)
314+
);
315+
}
316+
return false;
317+
};
318+
276319
export const severeEnoughToFail = (results: IRuleResult[], failSeverity: FailSeverity): boolean => {
277320
const diagnosticSeverity = getDiagnosticSeverity(failSeverity);
278321
return results.some(r => r.severity <= diagnosticSeverity);

packages/cli/src/formatters/json.ts

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,21 @@
1-
import { Formatter } from './types';
1+
import { ISpectralDiagnostic } from '@stoplight/spectral-core';
2+
import { Formatter, FormatterOptions } from './types';
23

3-
export const json: Formatter = results => {
4+
import { groupBySource, uniqueErrors, getCountsBySeverity, getScoringText } from './utils';
5+
6+
const version = process.env.npm_package_version;
7+
8+
export const json: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => {
9+
let spectralVersion = '';
10+
let groupedResults;
11+
let scoringText = '';
12+
if (options.scoringConfig !== void 0) {
13+
if (options.scoringConfig.customScoring !== undefined) {
14+
spectralVersion = `${options.scoringConfig.customScoring} ${version as string}`;
15+
}
16+
groupedResults = groupBySource(uniqueErrors(results));
17+
scoringText = getScoringText(getCountsBySeverity(groupedResults), options.scoringConfig);
18+
}
419
const outputJson = results.map(result => {
520
return {
621
code: result.code,
@@ -11,5 +26,17 @@ export const json: Formatter = results => {
1126
source: result.source,
1227
};
1328
});
14-
return JSON.stringify(outputJson, null, '\t');
29+
let objectOutput;
30+
if (options.scoringConfig !== void 0) {
31+
const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0);
32+
objectOutput = {
33+
version: spectralVersion,
34+
scoring: scoringText.replace('SCORING:', '').trim(),
35+
passed: scoring >= options.scoringConfig.threshold,
36+
results: outputJson,
37+
};
38+
} else {
39+
objectOutput = outputJson;
40+
}
41+
return JSON.stringify(objectOutput, null, '\t');
1542
};

packages/cli/src/formatters/pretty.ts

Lines changed: 45 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,22 +24,40 @@
2424
* @author Ava Thorn
2525
*/
2626

27+
import { ISpectralDiagnostic } from '@stoplight/spectral-core';
2728
import { printPath, PrintStyle } from '@stoplight/spectral-runtime';
28-
import { IDiagnostic, IRange } from '@stoplight/types';
29+
import { IDiagnostic, IRange, DiagnosticSeverity } from '@stoplight/types';
2930
import chalk from 'chalk';
3031

31-
import { Formatter } from './types';
32-
import { getColorForSeverity, getHighestSeverity, getSummary, getSeverityName, groupBySource } from './utils';
32+
import { Formatter, FormatterOptions } from './types';
33+
import {
34+
getColorForSeverity,
35+
getHighestSeverity,
36+
getSummary,
37+
getSeverityName,
38+
groupBySource,
39+
getScoringText,
40+
getCountsBySeverity,
41+
uniqueErrors,
42+
} from './utils';
43+
44+
const { version } = require('../../package.json');
3345

3446
function formatRange(range?: IRange): string {
3547
if (range === void 0) return '';
3648

3749
return ` ${range.start.line + 1}:${range.start.character + 1}`;
3850
}
3951

40-
export const pretty: Formatter = results => {
52+
export const pretty: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => {
4153
const cliui = require('cliui');
4254
let output = '\n';
55+
if (options.scoringConfig !== void 0) {
56+
if (options.scoringConfig.customScoring !== void 0) {
57+
output += `${options.scoringConfig.customScoring}${version as string}\n`;
58+
}
59+
}
60+
output += '\n';
4361
const DEFAULT_TOTAL_WIDTH = process.stdout.columns;
4462
const COLUMNS = [10, 13, 25, 20, 20];
4563
const variableColumns = DEFAULT_TOTAL_WIDTH - COLUMNS.reduce((a, b) => a + b);
@@ -50,10 +68,23 @@ export const pretty: Formatter = results => {
5068
const PAD_TOP1_LEFT0 = [1, 0, 0, 0];
5169
const ui = cliui({ width: DEFAULT_TOTAL_WIDTH, wrap: true });
5270

71+
const uniqueResults = uniqueErrors(results);
5372
const groupedResults = groupBySource(results);
54-
const summaryColor = getColorForSeverity(getHighestSeverity(results));
73+
const summaryColor = getColorForSeverity(getHighestSeverity(uniqueResults));
5574
const summaryText = getSummary(groupedResults);
5675

76+
let groupedUniqueResults = { ...groupedResults };
77+
let scoringColor = '';
78+
let scoringText = null;
79+
80+
if (options.scoringConfig !== void 0) {
81+
if (options.scoringConfig.uniqueErrors) {
82+
groupedUniqueResults = { ...groupBySource(uniqueResults) };
83+
}
84+
scoringColor = getColorForSeverity(DiagnosticSeverity.Information);
85+
scoringText = getScoringText(getCountsBySeverity(groupedUniqueResults), options.scoringConfig);
86+
}
87+
5788
const uniqueIssues: IDiagnostic['code'][] = [];
5889
Object.keys(groupedResults).forEach(i => {
5990
const pathResults = groupedResults[i];
@@ -83,6 +114,15 @@ export const pretty: Formatter = results => {
83114
output += ui.toString();
84115
output += chalk[summaryColor].bold(`${uniqueIssues.length} Unique Issue(s)\n`);
85116
output += chalk[summaryColor].bold(`\u2716${summaryText !== null ? ` ${summaryText}` : ''}\n`);
117+
if (options.scoringConfig !== void 0) {
118+
output += chalk[scoringColor].bold(`\u2716${scoringText !== null ? ` ${scoringText}` : ''}\n`);
119+
const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0);
120+
if (scoring >= options.scoringConfig.threshold) {
121+
output += chalk['green'].bold(`\u2716 PASSED!\n`);
122+
} else {
123+
output += chalk['red'].bold(`\u2716 NOT PASSED!\n`);
124+
}
125+
}
86126

87127
return output;
88128
};

0 commit comments

Comments
 (0)