Skip to content

Commit fd70b10

Browse files
v-jiaodiqiaozha
andauthored
Generate nullable as type alias and resolve recursive reference in union (#2989)
* test keyvault * resolve union circle reference in rlc * support nullable type and named union recursive reference * Update packages/typespec-ts/src/framework/hooks/binder.ts * Update packages/typespec-ts/src/modular/emitModels.ts * fix ci * fix extensible enum description in rlc * fix ci * add uts and remove keyvault in smoke test --------- Co-authored-by: Qiaoqiao Zhang <55688292+qiaozha@users.noreply.github.com> Co-authored-by: Qiaoqiao Zhang <qiaozha@microsoft.com>
1 parent 96725bc commit fd70b10

File tree

19 files changed

+341
-118
lines changed

19 files changed

+341
-118
lines changed

common/config/rush/pnpm-lock.yaml

Lines changed: 18 additions & 16 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

packages/typespec-test/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
"@azure-tools/typespec-autorest": ">=0.50.0 <1.0.0",
99
"@typespec/openapi3": ">=0.64.0 <1.0.0",
1010
"@azure-tools/typespec-azure-core": ">=0.50.0 <1.0.0",
11-
"@azure-tools/typespec-client-generator-core": ">=0.50.0 <1.0.0",
11+
"@azure-tools/typespec-client-generator-core": ">=0.50.2 <1.0.0",
1212
"@azure-tools/typespec-azure-resource-manager": ">=0.50.0 <1.0.0",
1313
"@azure-tools/typespec-azure-rulesets": ">=0.50.0 <1.0.0",
1414
"@typespec/compiler": ">=0.64.0 <1.0.0",

packages/typespec-test/test/openai_generic/generated/typespec-ts/review/openai-generic.api.md

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ export interface CreateChatCompletionRequest {
118118
model: "gpt4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k-0613";
119119
n?: number | null;
120120
presence_penalty?: number | null;
121-
stop?: Stop | null;
121+
stop?: Stop;
122122
stream?: boolean | null;
123123
temperature?: number | null;
124124
top_p?: number | null;
@@ -151,8 +151,8 @@ export interface CreateCompletionRequest {
151151
model: "babbage-002" | "davinci-002" | "text-davinci-003" | "text-davinci-002" | "text-davinci-001" | "code-davinci-002" | "text-curie-001" | "text-babbage-001" | "text-ada-001";
152152
n?: number | null;
153153
presence_penalty?: number | null;
154-
prompt: Prompt | null;
155-
stop?: Stop | null;
154+
prompt: Prompt;
155+
stop?: Stop;
156156
stream?: boolean | null;
157157
suffix?: string | null;
158158
temperature?: number | null;
@@ -797,10 +797,16 @@ export interface OpenAIFile {
797797
}
798798

799799
// @public
800-
export type Prompt = string | string[] | number[] | number[][];
800+
export type Prompt = Prompt_1 | null;
801801

802802
// @public
803-
export type Stop = string | string[];
803+
export type Prompt_1 = string | string[] | number[] | number[][];
804+
805+
// @public
806+
export type Stop = Stop_1 | null;
807+
808+
// @public
809+
export type Stop_1 = string | string[];
804810

805811
// (No @packageDocumentation comment for this package)
806812

packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,9 @@ export {
3434
CompletionUsage,
3535
CreateCompletionRequest,
3636
Prompt,
37+
Prompt_1,
3738
Stop,
39+
Stop_1,
3840
CreateCompletionResponse,
3941
CreateFineTuningJobRequest,
4042
FineTuningJob,

packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,9 @@ export {
3131
CompletionUsage,
3232
CreateCompletionRequest,
3333
Prompt,
34+
Prompt_1,
3435
Stop,
36+
Stop_1,
3537
CreateCompletionResponse,
3638
CreateFineTuningJobRequest,
3739
FineTuningJob,

packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1133,7 +1133,7 @@ export interface CreateCompletionRequest {
11331133
* Note that <|endoftext|> is the document separator that the model sees during training, so if a
11341134
* prompt is not specified the model will generate as if from the beginning of a new document.
11351135
*/
1136-
prompt: Prompt | null;
1136+
prompt: Prompt;
11371137
/** The suffix that comes after a completion of inserted text. */
11381138
suffix?: string | null;
11391139
/**
@@ -1166,7 +1166,7 @@ export interface CreateCompletionRequest {
11661166
*/
11671167
max_tokens?: number | null;
11681168
/** Up to 4 sequences where the API will stop generating further tokens. */
1169-
stop?: Stop | null;
1169+
stop?: Stop;
11701170
/**
11711171
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear
11721172
* in the text so far, increasing the model's likelihood to talk about new topics.
@@ -1251,14 +1251,18 @@ export function createCompletionRequestSerializer(
12511251
}
12521252

12531253
/** Alias for Prompt */
1254-
export type Prompt = string | string[] | number[] | number[][];
1254+
export type Prompt = Prompt_1 | null;
1255+
/** Alias for Prompt */
1256+
export type Prompt_1 = string | string[] | number[] | number[][];
12551257

12561258
export function promptSerializer(item: Prompt): any {
12571259
return item;
12581260
}
12591261

12601262
/** Alias for Stop */
1261-
export type Stop = string | string[];
1263+
export type Stop = Stop_1 | null;
1264+
/** Alias for Stop */
1265+
export type Stop_1 = string | string[];
12621266

12631267
export function stopSerializer(item: Stop): any {
12641268
return item;
@@ -1747,7 +1751,7 @@ export interface CreateChatCompletionRequest {
17471751
*/
17481752
max_tokens?: number | null;
17491753
/** Up to 4 sequences where the API will stop generating further tokens. */
1750-
stop?: Stop | null;
1754+
stop?: Stop;
17511755
/**
17521756
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear
17531757
* in the text so far, increasing the model's likelihood to talk about new topics.

packages/typespec-test/test/openai_non_branded/generated/typespec-ts/review/openai-non-branded.api.md

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ export interface CreateChatCompletionRequest {
118118
model: "gpt4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k-0613";
119119
n?: number | null;
120120
presencePenalty?: number | null;
121-
stop?: Stop | null;
121+
stop?: Stop;
122122
stream?: boolean | null;
123123
temperature?: number | null;
124124
topP?: number | null;
@@ -151,8 +151,8 @@ export interface CreateCompletionRequest {
151151
model: "babbage-002" | "davinci-002" | "text-davinci-003" | "text-davinci-002" | "text-davinci-001" | "code-davinci-002" | "text-curie-001" | "text-babbage-001" | "text-ada-001";
152152
n?: number | null;
153153
presencePenalty?: number | null;
154-
prompt: Prompt | null;
155-
stop?: Stop | null;
154+
prompt: Prompt;
155+
stop?: Stop;
156156
stream?: boolean | null;
157157
suffix?: string | null;
158158
temperature?: number | null;
@@ -797,10 +797,16 @@ export interface OpenAIFile {
797797
}
798798

799799
// @public
800-
export type Prompt = string | string[] | number[] | number[][];
800+
export type Prompt = Prompt_1 | null;
801801

802802
// @public
803-
export type Stop = string | string[];
803+
export type Prompt_1 = string | string[] | number[] | number[][];
804+
805+
// @public
806+
export type Stop = Stop_1 | null;
807+
808+
// @public
809+
export type Stop_1 = string | string[];
804810

805811
// (No @packageDocumentation comment for this package)
806812

packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,9 @@ export {
3333
CompletionUsage,
3434
CreateCompletionRequest,
3535
Prompt,
36+
Prompt_1,
3637
Stop,
38+
Stop_1,
3739
CreateCompletionResponse,
3840
CreateFineTuningJobRequest,
3941
FineTuningJob,

packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,9 @@ export {
3030
CompletionUsage,
3131
CreateCompletionRequest,
3232
Prompt,
33+
Prompt_1,
3334
Stop,
35+
Stop_1,
3436
CreateCompletionResponse,
3537
CreateFineTuningJobRequest,
3638
FineTuningJob,

packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/models.ts

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1132,7 +1132,7 @@ export interface CreateCompletionRequest {
11321132
* Note that <|endoftext|> is the document separator that the model sees during training, so if a
11331133
* prompt is not specified the model will generate as if from the beginning of a new document.
11341134
*/
1135-
prompt: Prompt | null;
1135+
prompt: Prompt;
11361136
/** The suffix that comes after a completion of inserted text. */
11371137
suffix?: string | null;
11381138
/**
@@ -1165,7 +1165,7 @@ export interface CreateCompletionRequest {
11651165
*/
11661166
maxTokens?: number | null;
11671167
/** Up to 4 sequences where the API will stop generating further tokens. */
1168-
stop?: Stop | null;
1168+
stop?: Stop;
11691169
/**
11701170
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear
11711171
* in the text so far, increasing the model's likelihood to talk about new topics.
@@ -1250,14 +1250,18 @@ export function createCompletionRequestSerializer(
12501250
}
12511251

12521252
/** Alias for Prompt */
1253-
export type Prompt = string | string[] | number[] | number[][];
1253+
export type Prompt = Prompt_1 | null;
1254+
/** Alias for Prompt */
1255+
export type Prompt_1 = string | string[] | number[] | number[][];
12541256

12551257
export function promptSerializer(item: Prompt): any {
12561258
return item;
12571259
}
12581260

12591261
/** Alias for Stop */
1260-
export type Stop = string | string[];
1262+
export type Stop = Stop_1 | null;
1263+
/** Alias for Stop */
1264+
export type Stop_1 = string | string[];
12611265

12621266
export function stopSerializer(item: Stop): any {
12631267
return item;
@@ -1746,7 +1750,7 @@ export interface CreateChatCompletionRequest {
17461750
*/
17471751
maxTokens?: number | null;
17481752
/** Up to 4 sequences where the API will stop generating further tokens. */
1749-
stop?: Stop | null;
1753+
stop?: Stop;
17501754
/**
17511755
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear
17521756
* in the text so far, increasing the model's likelihood to talk about new topics.

0 commit comments

Comments
 (0)