1
1
import {
2
2
BedrockRuntimeClient ,
3
3
InvokeModelCommand ,
4
- InvokeModelWithResponseStreamCommand ,
5
- ConverseStreamCommand ,
6
- ConverseCommand ,
7
- ConversationRole ,
4
+ // InvokeModelWithResponseStreamCommand,
5
+ // ConverseStreamCommand,
6
+ // ConverseCommand,
7
+ // ConversationRole,
8
8
} from "@aws-sdk/client-bedrock-runtime" ;
9
- import {
10
- AWSBedrockAnthropicStream ,
11
- StreamingTextResponse ,
12
- AWSBedrockLlama2Stream ,
13
- AWSBedrockCohereStream ,
14
- AWSBedrockStream ,
15
- } from "ai" ;
16
- import { experimental_buildAnthropicPrompt , experimental_buildLlama2Prompt } from "ai/prompts" ;
9
+ // import {
10
+ // AWSBedrockAnthropicStream,
11
+ // StreamingTextResponse,
12
+ // AWSBedrockLlama2Stream,
13
+ // AWSBedrockCohereStream,
14
+ // AWSBedrockStream,
15
+ // } from "ai";
16
+ // import { experimental_buildAnthropicPrompt, experimental_buildLlama2Prompt } from "ai/prompts";
17
17
import { NextApiRequest , NextApiResponse } from "next" ;
18
- import { NextResponse } from "next/server" ;
19
18
import { getServerClient } from "@/utils/ld-server" ;
20
-
21
- import { wait } from "@/utils/utils" ;
22
19
// import { ldClient } from "@/utils/ld-server/serverClient";
23
20
import { getCookie } from "cookies-next" ;
24
21
// export const dynamic = "force-dynamic";
25
22
// export const runtime = "edge";
26
23
24
+ //https://sdk.vercel.ai/providers/legacy-providers/aws-bedrock
27
25
export default async function chatResponse ( req : NextApiRequest , res : NextApiResponse ) {
28
26
const bedrockClient = new BedrockRuntimeClient ( {
29
27
region : process . env . AWS_DEFAULT_REGION ?? "us-west-2" ,
@@ -33,9 +31,9 @@ export default async function chatResponse(req: NextApiRequest, res: NextApiResp
33
31
} ,
34
32
} ) ;
35
33
const messages = req . body ;
36
- console . log ( "awefawefmessages" , messages ) ;
34
+
37
35
const ldClient = await getServerClient ( process . env . LD_SDK_KEY || "" ) ;
38
-
36
+ console . log ( "ldClient" , ldClient )
39
37
const context : any = getCookie ( "ld-context" ) || { "kind" : "user" , "name" : "anonymous" , "key" : "abc-123" } ;
40
38
41
39
const model = await ldClient . variation ( "ai-chatbot" , context , {
@@ -47,12 +45,12 @@ export default async function chatResponse(req: NextApiRequest, res: NextApiResp
47
45
} )
48
46
49
47
// Ask Claude for a streaming chat completion given the prompt
50
- const claudeMessage = [
51
- {
52
- role : "user" ,
53
- content : "Where is a good vacation place for under $1000? Limit to 100 characters." ,
54
- } ,
55
- ] ;
48
+ // const claudeMessage = [
49
+ // {
50
+ // role: "user",
51
+ // content: "Where is a good vacation place for under $1000? Limit to 100 characters.",
52
+ // },
53
+ // ];
56
54
57
55
const chatBotModelInput = new InvokeModelCommand ( {
58
56
modelId : model . modelId ,
@@ -67,8 +65,6 @@ export default async function chatResponse(req: NextApiRequest, res: NextApiResp
67
65
} ) ,
68
66
} ) ;
69
67
70
-
71
-
72
68
try {
73
69
const bedrockResponse = await bedrockClient . send ( chatBotModelInput ) ;
74
70
const decoder = new TextDecoder ( ) ;
@@ -79,52 +75,16 @@ export default async function chatResponse(req: NextApiRequest, res: NextApiResp
79
75
throw new Error ( error . message ) ;
80
76
}
81
77
82
- // const llamaMessage = [
83
- // {
84
- // role: "user",
85
- // content: "Where is a good vacation place for under $1000? Limit to 100 characters.",
86
- // },
87
- // ];
88
-
89
- // const llama = new InvokeModelWithResponseStreamCommand({
90
- // modelId: "meta.llama2-13b-chat-v1",
91
- // contentType: "application/json",
92
- // accept: "application/json",
93
- // body: JSON.stringify({
94
- // prompt: experimental_buildLlama2Prompt(llamaMessage),
95
- // temperature: 0.9,
96
- // max_gen_len: 500,
97
- // top_p: 1,
98
- // }),
99
- // });
100
-
101
- // const bedrockResponse = await bedrockClient.send(llama);
102
- // const stream = AWSBedrockLlama2Stream(bedrockResponse); // Convert the response into a friendly text-stream
103
- // return new StreamingTextResponse(stream); // Respond with the stream
104
-
105
- // const cohereMessage = [
106
- // {
107
- // role: "user",
108
- // content: "Where is a good vacation place for under $1000? Limit to 100 characters.",
109
- // },
110
- // ];
111
-
112
78
// const cohere = new InvokeModelWithResponseStreamCommand({
113
- // modelId: "cohere.command-text-v14",
114
- // contentType: "application/json",
115
- // accept: "application/json",
116
- // body: JSON.stringify({
117
- // prompt: experimental_buildLlama2Prompt(cohereMessage),
118
- // temperature: 0.9,
119
- // max_tokens: 500,
120
- // p: 1,
121
- // }),
122
- // });
123
-
124
- // const bedrockResponse = await bedrockClient.send(cohere);
125
- // const stream = AWSBedrockCohereStream(bedrockResponse); // Convert the response into a friendly text-stream
126
- // console.log("bedrockResponse", bedrockResponse)
127
- // console.log("stream",stream)
128
- // console.log("new StreamingTextResponse(stream)",new StreamingTextResponse(stream))
129
- // return new StreamingTextResponse(stream); // Respond with the stream
79
+ // modelId: "cohere.command-text-v14",
80
+ // contentType: "application/json",
81
+ // accept: "application/json",
82
+ // body: JSON.stringify({
83
+ // prompt: experimental_buildLlama2Prompt(cohereMessage),
84
+ // temperature: 0.9,
85
+ // max_tokens: 500,
86
+ // p: 1,
87
+ // }),
88
+ // });
89
+
130
90
}
0 commit comments