Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion cmd/server_main.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ func startServer(config *server.Config) {
searchService := service.NewSearchService()
fileService := service.NewFileService()
memoryService := service.NewMemoryService()
modelProviderService := service.NewModelProviderService()

// Initialize handler layer
authHandler := handler.NewAuthHandler()
Expand All @@ -193,7 +194,7 @@ func startServer(config *server.Config) {
searchHandler := handler.NewSearchHandler(searchService, userService)
fileHandler := handler.NewFileHandler(fileService, userService)
memoryHandler := handler.NewMemoryHandler(memoryService)
providerHandler := handler.NewProviderHandler(userService)
providerHandler := handler.NewProviderHandler(userService, modelProviderService)

// Initialize router
r := router.NewRouter(authHandler, userHandler, tenantHandler, documentHandler, datasetsHandler, systemHandler, kbHandler, chunkHandler, llmHandler, chatHandler, chatSessionHandler, connectorHandler, searchHandler, fileHandler, memoryHandler, providerHandler)
Expand Down
3 changes: 3 additions & 0 deletions conf/models/openai.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
"name": "OpenAI",
"tags": "LLM,TEXT EMBEDDING,TTS,TEXT RE-RANK,SPEECH2TEXT,MODERATION",
"url": "https://api.openai.com/v1",
"url_suffix": {
"chat": "chat/completions"
},
"models": [
{
"name": "gpt-5.2-pro",
Expand Down
3 changes: 3 additions & 0 deletions conf/models/xai.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
"name": "xAI",
"tags": "LLM",
"url": "https://api.x.ai/v1",
"url_suffix": {
"chat": "chat/completions"
},
"models": [
{
"name": "grok-4",
Expand Down
182 changes: 182 additions & 0 deletions conf/models/zhipu-ai.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
{
"name": "ZHIPU-AI",
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
"url": "https://open.bigmodel.cn/api/paas/v4",
"url_suffix": {
"chat": "chat/completions",
"async_chat": "async/chat/completions",
"async_result": "async-result",
"embedding": "embedding",
"rerank": "rerank"
},
"models": [
{
"name": "glm-4.7",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4.5",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4.5-x",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4.5-air",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4.5-airx",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4.5-flash",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4.5v",
"max_tokens": 64000,
"model_types": [
"image2text"
],
"features": {}
},
{
"name": "glm-4-plus",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4-0520",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4-airx",
"max_tokens": 8000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4-air",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4-flash",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4-flashx",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4-long",
"max_tokens": 1000000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-3-turbo",
"max_tokens": 128000,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "glm-4v",
"max_tokens": 2000,
"model_types": [
"image2text"
],
"features": {}
},
{
"name": "glm-4-9b",
"max_tokens": 8192,
"model_types": [
"chat"
],
"features": {}
},
{
"name": "embedding-2",
"max_tokens": 512,
"model_types": [
"embedding"
],
"features": {}
},
{
"name": "embedding-3",
"max_tokens": 512,
"model_types": [
"embedding"
],
"features": {}
},
{
"name": "glm-asr",
"max_tokens": 4096,
"model_types": [
"speech2text"
],
"features": {}
}
]
}
24 changes: 0 additions & 24 deletions internal/cli/admin_parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,30 +275,6 @@ func (p *Parser) parseAdminListDefaultModels() (*Command, error) {
return NewCommand("list_user_default_models"), nil
}

func (p *Parser) parseListModelsOfProvider() (*Command, error) {
if p.curToken.Type != TokenModels {
return nil, fmt.Errorf("expected MODELS")
}

p.nextToken()
if p.curToken.Type != TokenFrom {
return nil, fmt.Errorf("expected FROM")
}
p.nextToken()
providerName, err := p.parseQuotedString()
if err != nil {
return nil, err
}
cmd := NewCommand("list_provider_models")
cmd.Params["provider_name"] = providerName
p.nextToken()
// Semicolon is optional for UNSET TOKEN
if p.curToken.Type == TokenSemicolon {
p.nextToken()
}
return cmd, nil
}

func (p *Parser) parseCommonListProviders() (*Command, error) {
p.nextToken() // consume AVAILABLE

Expand Down
12 changes: 8 additions & 4 deletions internal/cli/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ func looksLikeSQL(s string) bool {
"LIST ", "SHOW ", "CREATE ", "DROP ", "ALTER ",
"LOGIN ", "REGISTER ", "PING", "GRANT ", "REVOKE ",
"SET ", "UNSET ", "UPDATE ", "DELETE ", "INSERT ",
"SELECT ", "DESCRIBE ", "EXPLAIN ",
"SELECT ", "DESCRIBE ", "EXPLAIN ", "ADD ", "ENABLE ", "DISABLE ", "CHAT ", "USE",
}
for _, prefix := range sqlPrefixes {
if strings.HasPrefix(s, prefix) {
Expand Down Expand Up @@ -1008,15 +1008,19 @@ Commands (User Mode):
LIST TOKENS; - List API tokens
LIST PROVIDERS; - List available LLM providers
CREATE TOKEN; - Create new API token
CREATE PROVIDER 'name'; - Create a provider without API key
CREATE PROVIDER 'name' 'api_key'; - Create a provider with API key
ADD PROVIDER 'name'; - Create a provider without API key
ADD PROVIDER 'name' 'api_key'; - Create a provider with API key
DROP TOKEN 'token_value'; - Delete an API token
DROP PROVIDER 'name'; - Delete a provider
DELETE PROVIDER 'name'; - Delete a provider
SET TOKEN 'token_value'; - Set and validate API token
SHOW TOKEN; - Show current API token
SHOW PROVIDER 'name'; - Show provider details
SHOW CURRENT MODEL; - Show current model settings
UNSET TOKEN; - Remove current API token
ALTER PROVIDER 'name' NAME 'new_name'; - Rename a provider
USE MODEL 'provider/instance/model'; - Set current model for chat
CHAT 'message'; - Chat using current model
CHAT 'provider/instance/model' 'message'; - Chat with specified model

Context Engine Commands (no quotes):
ls [path] - List resources
Expand Down
43 changes: 38 additions & 5 deletions internal/cli/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,21 @@ import (
// PasswordPromptFunc is a function type for password input
type PasswordPromptFunc func(prompt string) (string, error)

// CurrentModel holds the current model configuration
type CurrentModel struct {
Provider string
Instance string
Model string
}

// RAGFlowClient handles API interactions with the RAGFlow server
type RAGFlowClient struct {
HTTPClient *HTTPClient
ServerType string // "admin" or "user"
PasswordPrompt PasswordPromptFunc // Function for password input
OutputFormat OutputFormat // Output format: table, plain, json
ContextEngine *ce.Engine // Context Engine for virtual filesystem
CurrentModel *CurrentModel // Current model configuration
}

// NewRAGFlowClient creates a new RAGFlow client
Expand Down Expand Up @@ -158,6 +166,8 @@ func (c *RAGFlowClient) ExecuteAdminCommand(cmd *Command) (ResponseIf, error) {
return c.ShowProvider(cmd)
case "list_provider_models":
return c.ListModels(cmd)
case "list_instance_models":
return c.ListInstanceModels(cmd)
case "show_model":
return c.ShowModel(cmd)
// TODO: Implement other commands
Expand Down Expand Up @@ -203,21 +213,44 @@ func (c *RAGFlowClient) ExecuteUserCommand(cmd *Command) (ResponseIf, error) {
return c.CreateDocMetaIndex(cmd)
case "drop_doc_meta_index":
return c.DropDocMetaIndex(cmd)
case "list_pool_providers":
case "list_available_providers":
return c.ListAvailableProviders(cmd)
case "show_provider":
return c.ShowProvider(cmd)
case "list_provider_models":
return c.ListModels(cmd)
case "list_instance_models":
return c.ListInstanceModels(cmd)
case "show_model":
return c.ShowModel(cmd)
// Provider commands
case "create_provider":
return c.CreateProvider(cmd)
case "add_provider":
return c.AddProvider(cmd)
case "list_providers":
return c.ListProviders(cmd)
case "drop_provider":
return c.DropProvider(cmd)
case "delete_provider":
return c.DeleteProvider(cmd)
// Provider instance commands
case "create_provider_instance":
return c.CreateProviderInstance(cmd)
case "list_provider_instances":
return c.ListProviderInstances(cmd)
case "show_provider_instance":
return c.ShowProviderInstance(cmd)
case "alter_provider_instance":
return c.AlterProviderInstance(cmd)
case "drop_provider_instance":
return c.DropProviderInstance(cmd)
case "enable_model":
return c.EnableOrDisableModel(cmd, "enable")
case "disable_model":
return c.EnableOrDisableModel(cmd, "disable")
case "chat_to_model":
return c.ChatToModel(cmd)
case "use_model":
return c.UseModel(cmd)
case "show_current_model":
return c.ShowCurrentModel(cmd)
// ContextEngine commands
case "ce_ls":
return c.CEList(cmd)
Expand Down
Loading
Loading