1- import { describe , it , expect , afterEach } from "vitest" ;
1+ import { describe , it , expect , beforeAll , afterAll } from "vitest" ;
22import * as http from "node:http" ;
33import type { Fixture } from "../types.js" ;
44import { createServer , type ServerInstance } from "../server.js" ;
55import { buildBedrockStreamTextEvents } from "../bedrock.js" ;
66
77// --- helpers ---
88
9+ let instance : ServerInstance ;
10+ let baseUrl : string ;
11+
912function post (
10- url : string ,
13+ path : string ,
1114 body : unknown ,
1215) : Promise < { status : number ; headers : http . IncomingHttpHeaders ; body : string } > {
1316 return new Promise ( ( resolve , reject ) => {
1417 const data = JSON . stringify ( body ) ;
15- const parsed = new URL ( url ) ;
18+ const parsed = new URL ( baseUrl ) ;
1619 const req = http . request (
1720 {
1821 hostname : parsed . hostname ,
1922 port : parsed . port ,
20- path : parsed . pathname ,
23+ path,
2124 method : "POST" ,
2225 headers : {
2326 "Content-Type" : "application/json" ,
@@ -78,25 +81,24 @@ const plainFixture: Fixture = {
7881
7982const allFixtures : Fixture [ ] = [ reasoningFixture , plainFixture ] ;
8083
81- // --- tests ---
84+ // --- server lifecycle ---
8285
83- let instance : ServerInstance | null = null ;
86+ beforeAll ( async ( ) => {
87+ instance = await createServer ( allFixtures ) ;
88+ baseUrl = instance . url ;
89+ } ) ;
8490
85- afterEach ( async ( ) => {
86- if ( instance ) {
87- await new Promise < void > ( ( resolve ) => {
88- instance ! . server . close ( ( ) => resolve ( ) ) ;
89- } ) ;
90- instance = null ;
91- }
91+ afterAll ( async ( ) => {
92+ await new Promise < void > ( ( resolve ) => {
93+ instance . server . close ( ( ) => resolve ( ) ) ;
94+ } ) ;
9295} ) ;
9396
9497// ─── OpenAI Chat Completions: Reasoning ─────────────────────────────────────
9598
9699describe ( "POST /v1/chat/completions (reasoning non-streaming)" , ( ) => {
97100 it ( "includes reasoning_content field on assistant message" , async ( ) => {
98- instance = await createServer ( allFixtures ) ;
99- const res = await post ( `${ instance . url } /v1/chat/completions` , {
101+ const res = await post ( `/v1/chat/completions` , {
100102 model : "gpt-4" ,
101103 messages : [ { role : "user" , content : "think" } ] ,
102104 stream : false ,
@@ -111,8 +113,7 @@ describe("POST /v1/chat/completions (reasoning non-streaming)", () => {
111113 } ) ;
112114
113115 it ( "omits reasoning_content when reasoning is absent" , async ( ) => {
114- instance = await createServer ( allFixtures ) ;
115- const res = await post ( `${ instance . url } /v1/chat/completions` , {
116+ const res = await post ( `/v1/chat/completions` , {
116117 model : "gpt-4" ,
117118 messages : [ { role : "user" , content : "plain" } ] ,
118119 stream : false ,
@@ -126,8 +127,7 @@ describe("POST /v1/chat/completions (reasoning non-streaming)", () => {
126127
127128describe ( "POST /v1/chat/completions (reasoning streaming)" , ( ) => {
128129 it ( "emits reasoning_content deltas before content deltas" , async ( ) => {
129- instance = await createServer ( allFixtures ) ;
130- const res = await post ( `${ instance . url } /v1/chat/completions` , {
130+ const res = await post ( `/v1/chat/completions` , {
131131 model : "gpt-4" ,
132132 messages : [ { role : "user" , content : "think" } ] ,
133133 stream : true ,
@@ -159,8 +159,7 @@ describe("POST /v1/chat/completions (reasoning streaming)", () => {
159159 } ) ;
160160
161161 it ( "no reasoning_content deltas when reasoning is absent" , async ( ) => {
162- instance = await createServer ( allFixtures ) ;
163- const res = await post ( `${ instance . url } /v1/chat/completions` , {
162+ const res = await post ( `/v1/chat/completions` , {
164163 model : "gpt-4" ,
165164 messages : [ { role : "user" , content : "plain" } ] ,
166165 stream : true ,
@@ -188,8 +187,7 @@ function parseGeminiSSEChunks(body: string): unknown[] {
188187
189188describe ( "POST /v1beta/models/{model}:generateContent (reasoning non-streaming)" , ( ) => {
190189 it ( "includes thought part before text part" , async ( ) => {
191- instance = await createServer ( allFixtures ) ;
192- const res = await post ( `${ instance . url } /v1beta/models/gemini-2.5-flash:generateContent` , {
190+ const res = await post ( `/v1beta/models/gemini-2.5-flash:generateContent` , {
193191 contents : [ { role : "user" , parts : [ { text : "think" } ] } ] ,
194192 } ) ;
195193
@@ -204,8 +202,7 @@ describe("POST /v1beta/models/{model}:generateContent (reasoning non-streaming)"
204202 } ) ;
205203
206204 it ( "no thought part when reasoning is absent" , async ( ) => {
207- instance = await createServer ( allFixtures ) ;
208- const res = await post ( `${ instance . url } /v1beta/models/gemini-2.5-flash:generateContent` , {
205+ const res = await post ( `/v1beta/models/gemini-2.5-flash:generateContent` , {
209206 contents : [ { role : "user" , parts : [ { text : "plain" } ] } ] ,
210207 } ) ;
211208
@@ -219,8 +216,7 @@ describe("POST /v1beta/models/{model}:generateContent (reasoning non-streaming)"
219216
220217describe ( "POST /v1beta/models/{model}:streamGenerateContent (reasoning streaming)" , ( ) => {
221218 it ( "streams thought chunks before text chunks" , async ( ) => {
222- instance = await createServer ( allFixtures ) ;
223- const res = await post ( `${ instance . url } /v1beta/models/gemini-2.5-flash:streamGenerateContent` , {
219+ const res = await post ( `/v1beta/models/gemini-2.5-flash:streamGenerateContent` , {
224220 contents : [ { role : "user" , parts : [ { text : "think" } ] } ] ,
225221 } ) ;
226222
@@ -251,8 +247,7 @@ describe("POST /v1beta/models/{model}:streamGenerateContent (reasoning streaming
251247 } ) ;
252248
253249 it ( "no thought chunks when reasoning is absent" , async ( ) => {
254- instance = await createServer ( allFixtures ) ;
255- const res = await post ( `${ instance . url } /v1beta/models/gemini-2.5-flash:streamGenerateContent` , {
250+ const res = await post ( `/v1beta/models/gemini-2.5-flash:streamGenerateContent` , {
256251 contents : [ { role : "user" , parts : [ { text : "plain" } ] } ] ,
257252 } ) ;
258253
@@ -271,8 +266,7 @@ describe("POST /v1beta/models/{model}:streamGenerateContent (reasoning streaming
271266
272267describe ( "POST /model/{id}/invoke (reasoning non-streaming)" , ( ) => {
273268 it ( "includes thinking content block before text block" , async ( ) => {
274- instance = await createServer ( allFixtures ) ;
275- const res = await post ( `${ instance . url } /model/anthropic.claude-3-sonnet-20240229-v1:0/invoke` , {
269+ const res = await post ( `/model/anthropic.claude-3-sonnet-20240229-v1:0/invoke` , {
276270 messages : [ { role : "user" , content : [ { type : "text" , text : "think" } ] } ] ,
277271 max_tokens : 1024 ,
278272 anthropic_version : "bedrock-2023-05-31" ,
@@ -288,8 +282,7 @@ describe("POST /model/{id}/invoke (reasoning non-streaming)", () => {
288282 } ) ;
289283
290284 it ( "no thinking block when reasoning is absent" , async ( ) => {
291- instance = await createServer ( allFixtures ) ;
292- const res = await post ( `${ instance . url } /model/anthropic.claude-3-sonnet-20240229-v1:0/invoke` , {
285+ const res = await post ( `/model/anthropic.claude-3-sonnet-20240229-v1:0/invoke` , {
293286 messages : [ { role : "user" , content : [ { type : "text" , text : "plain" } ] } ] ,
294287 max_tokens : 1024 ,
295288 anthropic_version : "bedrock-2023-05-31" ,
@@ -305,13 +298,9 @@ describe("POST /model/{id}/invoke (reasoning non-streaming)", () => {
305298
306299describe ( "POST /model/{id}/converse (reasoning non-streaming)" , ( ) => {
307300 it ( "includes reasoningContent block before text block" , async ( ) => {
308- instance = await createServer ( allFixtures ) ;
309- const res = await post (
310- `${ instance . url } /model/anthropic.claude-3-sonnet-20240229-v1:0/converse` ,
311- {
312- messages : [ { role : "user" , content : [ { text : "think" } ] } ] ,
313- } ,
314- ) ;
301+ const res = await post ( `/model/anthropic.claude-3-sonnet-20240229-v1:0/converse` , {
302+ messages : [ { role : "user" , content : [ { text : "think" } ] } ] ,
303+ } ) ;
315304
316305 expect ( res . status ) . toBe ( 200 ) ;
317306 const body = JSON . parse ( res . body ) ;
@@ -325,13 +314,9 @@ describe("POST /model/{id}/converse (reasoning non-streaming)", () => {
325314 } ) ;
326315
327316 it ( "no reasoningContent block when reasoning is absent" , async ( ) => {
328- instance = await createServer ( allFixtures ) ;
329- const res = await post (
330- `${ instance . url } /model/anthropic.claude-3-sonnet-20240229-v1:0/converse` ,
331- {
332- messages : [ { role : "user" , content : [ { text : "plain" } ] } ] ,
333- } ,
334- ) ;
317+ const res = await post ( `/model/anthropic.claude-3-sonnet-20240229-v1:0/converse` , {
318+ messages : [ { role : "user" , content : [ { text : "plain" } ] } ] ,
319+ } ) ;
335320
336321 const body = JSON . parse ( res . body ) ;
337322 const content = body . output . message . content ;
@@ -351,8 +336,7 @@ function parseNDJSON(body: string): object[] {
351336
352337describe ( "POST /api/chat (reasoning non-streaming)" , ( ) => {
353338 it ( "includes reasoning_content on assistant message" , async ( ) => {
354- instance = await createServer ( allFixtures ) ;
355- const res = await post ( `${ instance . url } /api/chat` , {
339+ const res = await post ( `/api/chat` , {
356340 model : "deepseek-r1" ,
357341 messages : [ { role : "user" , content : "think" } ] ,
358342 stream : false ,
@@ -365,8 +349,7 @@ describe("POST /api/chat (reasoning non-streaming)", () => {
365349 } ) ;
366350
367351 it ( "omits reasoning_content when reasoning is absent" , async ( ) => {
368- instance = await createServer ( allFixtures ) ;
369- const res = await post ( `${ instance . url } /api/chat` , {
352+ const res = await post ( `/api/chat` , {
370353 model : "deepseek-r1" ,
371354 messages : [ { role : "user" , content : "plain" } ] ,
372355 stream : false ,
@@ -380,8 +363,7 @@ describe("POST /api/chat (reasoning non-streaming)", () => {
380363
381364describe ( "POST /api/chat (reasoning streaming)" , ( ) => {
382365 it ( "streams reasoning_content chunks before content chunks" , async ( ) => {
383- instance = await createServer ( allFixtures ) ;
384- const res = await post ( `${ instance . url } /api/chat` , {
366+ const res = await post ( `/api/chat` , {
385367 model : "deepseek-r1" ,
386368 messages : [ { role : "user" , content : "think" } ] ,
387369 stream : true ,
@@ -409,8 +391,7 @@ describe("POST /api/chat (reasoning streaming)", () => {
409391 } ) ;
410392
411393 it ( "no reasoning_content chunks when reasoning is absent" , async ( ) => {
412- instance = await createServer ( allFixtures ) ;
413- const res = await post ( `${ instance . url } /api/chat` , {
394+ const res = await post ( `/api/chat` , {
414395 model : "deepseek-r1" ,
415396 messages : [ { role : "user" , content : "plain" } ] ,
416397 stream : true ,
@@ -432,8 +413,7 @@ describe("POST /api/chat (reasoning streaming)", () => {
432413
433414describe ( "POST /api/generate (reasoning non-streaming)" , ( ) => {
434415 it ( "includes reasoning_content field" , async ( ) => {
435- instance = await createServer ( allFixtures ) ;
436- const res = await post ( `${ instance . url } /api/generate` , {
416+ const res = await post ( `/api/generate` , {
437417 model : "deepseek-r1" ,
438418 prompt : "think" ,
439419 stream : false ,
@@ -446,8 +426,7 @@ describe("POST /api/generate (reasoning non-streaming)", () => {
446426 } ) ;
447427
448428 it ( "omits reasoning_content when reasoning is absent" , async ( ) => {
449- instance = await createServer ( allFixtures ) ;
450- const res = await post ( `${ instance . url } /api/generate` , {
429+ const res = await post ( `/api/generate` , {
451430 model : "deepseek-r1" ,
452431 prompt : "plain" ,
453432 stream : false ,
@@ -461,8 +440,7 @@ describe("POST /api/generate (reasoning non-streaming)", () => {
461440
462441describe ( "POST /api/generate (reasoning streaming)" , ( ) => {
463442 it ( "streams reasoning_content chunks before response chunks" , async ( ) => {
464- instance = await createServer ( allFixtures ) ;
465- const res = await post ( `${ instance . url } /api/generate` , {
443+ const res = await post ( `/api/generate` , {
466444 model : "deepseek-r1" ,
467445 prompt : "think" ,
468446 stream : true ,
@@ -489,8 +467,7 @@ describe("POST /api/generate (reasoning streaming)", () => {
489467 } ) ;
490468
491469 it ( "no reasoning_content chunks when reasoning is absent" , async ( ) => {
492- instance = await createServer ( allFixtures ) ;
493- const res = await post ( `${ instance . url } /api/generate` , {
470+ const res = await post ( `/api/generate` , {
494471 model : "deepseek-r1" ,
495472 prompt : "plain" ,
496473 stream : true ,
0 commit comments