Skip to content

Commit a218390

Browse files
authored
fix(cli): max_tokens is deprecated in favor of max_completion_tokens (#1151)
Signed-off-by: Radek Ježek <radek.jezek@ibm.com>
1 parent c62554d commit a218390

2 files changed

Lines changed: 39 additions & 33 deletions

File tree

  • apps
    • beeai-cli/src/beeai_cli/commands
    • beeai-server/src/beeai_server/api/routes

apps/beeai-cli/src/beeai_cli/commands/model.py

Lines changed: 38 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ async def _select_default_model(capability: ModelCapability) -> str | None:
348348
if capability == ModelCapability.LLM:
349349
test_response = await client.chat.completions.create(
350350
model=selected_model,
351-
max_tokens=500, # reasoning models need some tokens to think about this
351+
max_completion_tokens=500, # reasoning models need some tokens to think about this
352352
messages=[
353353
{
354354
"role": "system",
@@ -522,14 +522,18 @@ async def add_provider(
522522
async with configuration.use_platform_client():
523523
await _add_provider(capability)
524524

525-
conf = await SystemConfiguration.get()
526-
default_model = conf.default_llm_model if capability == ModelCapability.LLM else conf.default_embedding_model
527-
if not default_model:
528-
default_model = await _select_default_model(capability)
529-
default_llm = default_model if capability == ModelCapability.LLM else conf.default_llm_model
530-
default_embedding = default_model if capability == ModelCapability.EMBEDDING else conf.default_embedding_model
531-
with console.status("Saving configuration...", spinner="dots"):
532-
await SystemConfiguration.update(default_llm_model=default_llm, default_embedding_model=default_embedding)
525+
conf = await SystemConfiguration.get()
526+
default_model = conf.default_llm_model if capability == ModelCapability.LLM else conf.default_embedding_model
527+
if not default_model:
528+
default_model = await _select_default_model(capability)
529+
default_llm = default_model if capability == ModelCapability.LLM else conf.default_llm_model
530+
default_embedding = (
531+
default_model if capability == ModelCapability.EMBEDDING else conf.default_embedding_model
532+
)
533+
with console.status("Saving configuration...", spinner="dots"):
534+
await SystemConfiguration.update(
535+
default_llm_model=default_llm, default_embedding_model=default_embedding
536+
)
533537

534538

535539
def _select_provider(providers: list[ModelProvider], search_path: str) -> ModelProvider:
@@ -551,35 +555,36 @@ async def remove_provider(
551555
str | None, typer.Argument(..., help="Provider type or part of the provider base url")
552556
] = None,
553557
):
554-
conf = await SystemConfiguration.get()
555-
556558
async with configuration.use_platform_client():
557-
providers = await ModelProvider.list()
559+
conf = await SystemConfiguration.get()
558560

559-
if not search_path:
560-
provider: ModelProvider = await inquirer.select( # type: ignore
561-
message="Choose a provider to remove:",
562-
choices=[Choice(name=f"{p.type} ({p.base_url})", value=p) for p in providers],
563-
).execute_async()
564-
else:
565-
provider = _select_provider(providers, search_path)
561+
async with configuration.use_platform_client():
562+
providers = await ModelProvider.list()
566563

567-
await provider.delete()
564+
if not search_path:
565+
provider: ModelProvider = await inquirer.select( # type: ignore
566+
message="Choose a provider to remove:",
567+
choices=[Choice(name=f"{p.type} ({p.base_url})", value=p) for p in providers],
568+
).execute_async()
569+
else:
570+
provider = _select_provider(providers, search_path)
568571

569-
default_llm = None if (conf.default_llm_model or "").startswith(provider.type) else conf.default_llm_model
570-
default_embed = (
571-
None if (conf.default_embedding_model or "").startswith(provider.type) else conf.default_embedding_model
572-
)
572+
await provider.delete()
573573

574-
try:
575-
if (conf.default_llm_model or "").startswith(provider.type):
576-
console.print("The provider was used as default llm model. Please select another one...")
577-
default_llm = await _select_default_model(ModelCapability.LLM)
578-
if (conf.default_embedding_model or "").startswith(provider.type):
579-
console.print("The provider was used as default embedding model. Please select another one...")
580-
default_embed = await _select_default_model(ModelCapability.EMBEDDING)
581-
finally:
582-
await SystemConfiguration.update(default_llm_model=default_llm, default_embedding_model=default_embed)
574+
default_llm = None if (conf.default_llm_model or "").startswith(provider.type) else conf.default_llm_model
575+
default_embed = (
576+
None if (conf.default_embedding_model or "").startswith(provider.type) else conf.default_embedding_model
577+
)
578+
579+
try:
580+
if (conf.default_llm_model or "").startswith(provider.type):
581+
console.print("The provider was used as default llm model. Please select another one...")
582+
default_llm = await _select_default_model(ModelCapability.LLM)
583+
if (conf.default_embedding_model or "").startswith(provider.type):
584+
console.print("The provider was used as default embedding model. Please select another one...")
585+
default_embed = await _select_default_model(ModelCapability.EMBEDDING)
586+
finally:
587+
await SystemConfiguration.update(default_llm_model=default_llm, default_embedding_model=default_embed)
583588

584589
await list_model_providers()
585590

apps/beeai-server/src/beeai_server/api/routes/openai.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ async def create_chat_completion(
5858
response_format=request.response_format,
5959
temperature=request.temperature,
6060
max_tokens=request.max_tokens,
61+
max_completion_tokens=request.max_completion_tokens,
6162
top_p=request.top_p,
6263
n=request.n,
6364
logit_bias=request.logit_bias,

0 commit comments

Comments
 (0)