@@ -138,8 +138,14 @@ def create(cls, args):
138
138
if args .n is not None and args .n > 1 and args .stream :
139
139
raise ValueError ("Can't stream completions with n>1 with the current CLI" )
140
140
141
+ if args .engine and args .model :
142
+ warnings .warn (
143
+ "In most cases, you should not be specifying both engine and model."
144
+ )
145
+
141
146
resp = openai .Completion .create (
142
147
engine = args .engine ,
148
+ model = args .model ,
143
149
n = args .n ,
144
150
max_tokens = args .max_tokens ,
145
151
logprobs = args .logprobs ,
@@ -253,30 +259,14 @@ def create(cls, args):
253
259
return
254
260
255
261
sys .stdout .write (
256
- "Created job : {job_id}\n "
257
- "Streaming events until the job is complete...\n \n "
258
- "(Ctrl-C will interrupt the stream, but not cancel the job )\n " .format (
262
+ "Created fine-tune : {job_id}\n "
263
+ "Streaming events until fine-tuning is complete...\n \n "
264
+ "(Ctrl-C will interrupt the stream, but not cancel the fine-tune )\n " .format (
259
265
job_id = resp ["id" ]
260
266
)
261
267
)
262
268
cls ._stream_events (resp ["id" ])
263
269
264
- resp = openai .FineTune .retrieve (id = resp ["id" ])
265
- status = resp ["status" ]
266
- sys .stdout .write ("\n Job complete! Status: {status}" .format (status = status ))
267
- if status == "succeeded" :
268
- sys .stdout .write (" 🎉" )
269
- sys .stdout .write (
270
- "\n Try out your fine-tuned model: {model}\n "
271
- "(Pass this as the model parameter to a completion request)" .format (
272
- model = resp ["fine_tuned_model" ]
273
- )
274
- )
275
- # TODO(rachel): Print instructions on how to use the model here.
276
- elif status == "failed" :
277
- sys .
stdout .
write (
"\n Please contact [email protected] for assistance." )
278
- sys .stdout .write ("\n " )
279
-
280
270
@classmethod
281
271
def get (cls , args ):
282
272
resp = openai .FineTune .retrieve (id = args .id )
@@ -296,8 +286,8 @@ def signal_handler(sig, frame):
296
286
status = openai .FineTune .retrieve (job_id ).status
297
287
sys .stdout .write (
298
288
"\n Stream interrupted. Job is still {status}. "
299
- "To cancel your job, run:\n "
300
- "` openai api fine_tunes.cancel -i {job_id}` \n " .format (
289
+ "To cancel your job, run:\n \n "
290
+ "openai api fine_tunes.cancel -i {job_id}\n " .format (
301
291
status = status , job_id = job_id
302
292
)
303
293
)
@@ -318,6 +308,22 @@ def signal_handler(sig, frame):
318
308
sys .stdout .write ("\n " )
319
309
sys .stdout .flush ()
320
310
311
+ resp = openai .FineTune .retrieve (id = job_id )
312
+ status = resp ["status" ]
313
+ if status == "succeeded" :
314
+ sys .stdout .write ("\n Job complete! Status: succeeded 🎉" )
315
+ sys .stdout .write (
316
+ "\n Try out your fine-tuned model:\n \n "
317
+ "openai api completions.create -m {model} -p <YOUR_PROMPT>" .format (
318
+ model = resp ["fine_tuned_model" ]
319
+ )
320
+ )
321
+ elif status == "failed" :
322
+ sys .stdout .write (
323
+ "\n Job failed. Please contact [email protected] if you need assistance."
324
+ )
325
+ sys .stdout .write ("\n " )
326
+
321
327
@classmethod
322
328
def cancel (cls , args ):
323
329
resp = openai .FineTune .cancel (id = args .id )
@@ -422,7 +428,16 @@ def help(args):
422
428
423
429
# Completions
424
430
sub = subparsers .add_parser ("completions.create" )
425
- sub .add_argument ("-e" , "--engine" , required = True , help = "The engine to use" )
431
+ sub .add_argument (
432
+ "-e" ,
433
+ "--engine" ,
434
+ help = "The engine to use. See https://beta.openai.com/docs/engines for more about what engines are available." ,
435
+ )
436
+ sub .add_argument (
437
+ "-m" ,
438
+ "--model" ,
439
+ help = "The model to use. At most one of `engine` or `model` should be specified." ,
440
+ )
426
441
sub .add_argument (
427
442
"--stream" , help = "Stream tokens as they're ready." , action = "store_true"
428
443
)
0 commit comments