@@ -24,18 +24,18 @@ package main
2424import (
2525 " context"
2626 " fmt"
27- gogpt " github.com/sashabaranov/go-openai"
27+ openai " github.com/sashabaranov/go-openai"
2828)
2929
3030func main () {
31- client := gogpt .NewClient (" your token" )
31+ client := openai .NewClient (" your token" )
3232 resp , err := c.CreateChatCompletion (
3333 context.Background (),
34- gogpt .ChatCompletionRequest {
35- Model: gogpt .GPT3Dot5Turbo ,
36- Messages: []gogpt .ChatCompletionMessage {
34+ openai .ChatCompletionRequest {
35+ Model: openai .GPT3Dot5Turbo ,
36+ Messages: []openai .ChatCompletionMessage {
3737 {
38- Role: " user " ,
38+ Role: openai. ChatMessageRoleUser ,
3939 Content: " Hello!" ,
4040 },
4141 },
@@ -64,15 +64,15 @@ package main
6464import (
6565 " context"
6666 " fmt"
67- gogpt " github.com/sashabaranov/go-openai"
67+ openai " github.com/sashabaranov/go-openai"
6868)
6969
7070func main () {
71- c := gogpt .NewClient (" your token" )
71+ c := openai .NewClient (" your token" )
7272 ctx := context.Background ()
7373
74- req := gogpt .CompletionRequest {
75- Model: gogpt .GPT3Ada ,
74+ req := openai .CompletionRequest {
75+ Model: openai .GPT3Ada ,
7676 MaxTokens: 5 ,
7777 Prompt: " Lorem ipsum" ,
7878 }
@@ -96,15 +96,15 @@ import (
9696 " context"
9797 " fmt"
9898 " io"
99- gogpt " github.com/sashabaranov/go-openai"
99+ openai " github.com/sashabaranov/go-openai"
100100)
101101
102102func main () {
103- c := gogpt .NewClient (" your token" )
103+ c := openai .NewClient (" your token" )
104104 ctx := context.Background ()
105105
106- req := gogpt .CompletionRequest {
107- Model: gogpt .GPT3Ada ,
106+ req := openai .CompletionRequest {
107+ Model: openai .GPT3Ada ,
108108 MaxTokens: 5 ,
109109 Prompt: " Lorem ipsum" ,
110110 Stream: true ,
@@ -146,15 +146,15 @@ import (
146146 " context"
147147 " fmt"
148148 " io"
149- gogpt " github.com/sashabaranov/go-openai"
149+ openai " github.com/sashabaranov/go-openai"
150150)
151151
152152func main () {
153- c := gogpt .NewClient (" your token" )
153+ c := openai .NewClient (" your token" )
154154 ctx := context.Background ()
155155
156- req := gogpt .CompletionRequest {
157- Model: gogpt .GPT3Ada ,
156+ req := openai .CompletionRequest {
157+ Model: openai .GPT3Ada ,
158158 MaxTokens: 5 ,
159159 Prompt: " Lorem ipsum" ,
160160 Stream: true ,
0 commit comments