Skip to content
This repository was archived by the owner on Nov 7, 2025. It is now read-only.

Commit ccf965a

Browse files
trzysiektrzysiek
andauthored
Fix queries not showing in UI (#1374)
I'll write a short fix + some test for it. Some long user story below: I haven't used `common_table` a lot yet, even though recently I found the feature very useful. I didn't have any problems in a few of my encounters with it, but now I misconfigured something a bit and what our debug interface shows in the scenario, seems not enough for me. Simplest for me will be I think to provide a user story: 1. I personally wanted to test/add a test case for `DateTime64(9)` field in Clickhouse. 2. I started Quesma, set in config: ``` '*': useCommonTable: true target: [ my-clickhouse-data-source ] ``` 3. Then what I've did in Clickhouse: (all 3 steps of course) <img width="675" alt="Screenshot 2025-03-16 at 13 20 24" src="https://github.com/user-attachments/assets/54d26478-6e82-4d64-a12b-2317257339d2" /> 4. Then added data view to kibana for table `q` and I see no data. It's probably fine, but anyway I think it'll be worth to spend a bit time thinking how to make user experience more friendly and give the user some hints in most frequent use cases. Or a message to contact us for help. <img width="1728" alt="Screenshot 2025-03-16 at 17 18 49" src="https://github.com/user-attachments/assets/17c742da-b9b2-495f-8308-3a6a0227b6be" /> 5. But what really convinced I need to tackle the issue is our Debug UI: nothing, no traffic. Even though there are requests flowing and (empty, but still) responses from Quesma: <img width="615" alt="Screenshot 2025-03-16 at 17 21 32" src="https://github.com/user-attachments/assets/1d0fca92-6ec3-40a9-b85b-8047b3dbfeec" /> <img width="1728" alt="Screenshot 2025-03-16 at 17 21 26" src="https://github.com/user-attachments/assets/b618b4a2-59e1-4b6b-89ee-b00914063e39" /> After - queries show up in the UI: ![Screenshot 2025-03-21 at 13 35 33](https://github.com/user-attachments/assets/3ef9bf24-a069-4ced-bd95-3b48729780a3) --------- Co-authored-by: trzysiek <[email protected]>
1 parent fedef4a commit ccf965a

File tree

1 file changed

+64
-45
lines changed

1 file changed

+64
-45
lines changed

platform/frontend_connectors/search.go

Lines changed: 64 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -454,39 +454,62 @@ func (q *QueryRunner) executePlan(ctx context.Context, plan *model.ExecutionPlan
454454
}
455455
}
456456

457-
func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern string, body types.JSON, optAsync *AsyncQuery) ([]byte, error) {
457+
func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern string, body types.JSON, optAsync *AsyncQuery) (resp []byte, err error) {
458+
var (
459+
id = "FAKE_ID"
460+
path = ""
461+
startTime = time.Now()
462+
resolvedIndexes []string
463+
queryTranslator IQueryTranslator
464+
plan *model.ExecutionPlan
465+
clickhouseConnector *quesma_api.ConnectorDecisionClickhouse
466+
table *clickhouse.Table // TODO we should use schema here only
467+
tables clickhouse.TableMap
468+
currentSchema schema.Schema
469+
)
470+
471+
if val := ctx.Value(tracing.RequestIdCtxKey); val != nil {
472+
if str, ok := val.(string); ok {
473+
id = str
474+
}
475+
}
476+
if val := ctx.Value(tracing.RequestPath); val != nil {
477+
if str, ok := val.(string); ok {
478+
path = str
479+
}
480+
}
458481

459482
decision := q.tableResolver.Resolve(quesma_api.QueryPipeline, indexPattern)
460483

461484
if decision.Err != nil {
462-
463-
var resp []byte
464485
if optAsync != nil {
465486
resp, _ = elastic_query_dsl.EmptyAsyncSearchResponse(optAsync.asyncId, false, 200)
466487
} else {
467488
resp = elastic_query_dsl.EmptySearchResponse(ctx)
468489
}
469-
return resp, decision.Err
490+
err = decision.Err
491+
goto logErrorAndReturn
470492
}
471493

472494
if decision.IsEmpty {
473495
if optAsync != nil {
474-
return elastic_query_dsl.EmptyAsyncSearchResponse(optAsync.asyncId, false, 200)
496+
resp, err = elastic_query_dsl.EmptyAsyncSearchResponse(optAsync.asyncId, false, 200)
475497
} else {
476-
return elastic_query_dsl.EmptySearchResponse(ctx), nil
498+
resp = elastic_query_dsl.EmptySearchResponse(ctx)
477499
}
500+
goto logErrorAndReturn
478501
}
479502

480503
if decision.IsClosed {
481-
return nil, quesma_errors.ErrIndexNotExists() // TODO
504+
err = quesma_errors.ErrIndexNotExists() // TODO
505+
goto logErrorAndReturn
482506
}
483507

484508
if len(decision.UseConnectors) == 0 {
485-
return nil, end_user_errors.ErrSearchCondition.New(fmt.Errorf("no connectors to use"))
509+
err = end_user_errors.ErrSearchCondition.New(fmt.Errorf("no connectors to use"))
510+
goto logErrorAndReturn
486511
}
487512

488-
var clickhouseConnector *quesma_api.ConnectorDecisionClickhouse
489-
490513
for _, connector := range decision.UseConnectors {
491514
switch c := connector.(type) {
492515

@@ -499,53 +522,43 @@ func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern strin
499522
// This code lives only to postpone bigger refactor of `handleSearchCommon` which also supports async and A/B testing
500523

501524
default:
502-
return nil, fmt.Errorf("unknown connector type: %T", c)
525+
err = fmt.Errorf("unknown connector type: %T", c)
526+
goto logErrorAndReturn
503527
}
504528
}
505529

506530
if clickhouseConnector == nil {
507531
logger.Warn().Msgf("multi-search payload contains Elasticsearch-targetted query")
508-
return nil, fmt.Errorf("quesma-processed _msearch payload contains Elasticsearch-targetted query")
532+
err = fmt.Errorf("quesma-processed _msearch payload contains Elasticsearch-targetted query")
533+
goto logErrorAndReturn
509534
}
510535

511-
var responseBody []byte
512-
513-
startTime := time.Now()
514-
id := "FAKE_ID"
515-
if val := ctx.Value(tracing.RequestIdCtxKey); val != nil {
516-
id = val.(string)
517-
}
518-
path := ""
519-
if value := ctx.Value(tracing.RequestPath); value != nil {
520-
if str, ok := value.(string); ok {
521-
path = str
522-
}
523-
}
524-
525-
tables, err := q.logManager.GetTableDefinitions()
536+
startTime = time.Now()
537+
tables, err = q.logManager.GetTableDefinitions()
526538
if err != nil {
527-
return nil, err
539+
goto logErrorAndReturn
528540
}
529541

530-
var table *clickhouse.Table // TODO we should use schema here only
531-
var currentSchema schema.Schema
532-
resolvedIndexes := clickhouseConnector.ClickhouseIndexes
542+
resolvedIndexes = clickhouseConnector.ClickhouseIndexes
533543

534544
if !clickhouseConnector.IsCommonTable {
535545
if len(resolvedIndexes) < 1 {
536-
return []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load [%s] schema", resolvedIndexes)).Details("Table: [%v]", resolvedIndexes)
546+
resp, err = []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load [%s] schema", resolvedIndexes)).Details("Table: [%v]", resolvedIndexes)
547+
goto logErrorAndReturn
537548
}
538549
indexName := resolvedIndexes[0] // we got exactly one table here because of the check above
539550
resolvedTableName := q.cfg.IndexConfig[indexName].TableName(indexName)
540551

541552
resolvedSchema, ok := q.schemaRegistry.FindSchema(schema.IndexName(indexName))
542553
if !ok {
543-
return []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s schema", resolvedTableName)).Details("Table: %s", resolvedTableName)
554+
resp, err = []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s schema", resolvedTableName)).Details("Table: %s", resolvedTableName)
555+
goto logErrorAndReturn
544556
}
545557

546558
table, _ = tables.Load(resolvedTableName)
547559
if table == nil {
548-
return []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s table", resolvedTableName)).Details("Table: %s", resolvedTableName)
560+
resp, err = []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s table", resolvedTableName)).Details("Table: %s", resolvedTableName)
561+
goto logErrorAndReturn
549562
}
550563

551564
currentSchema = resolvedSchema
@@ -567,15 +580,17 @@ func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern strin
567580

568581
if len(resolvedIndexes) == 0 {
569582
if optAsync != nil {
570-
return elastic_query_dsl.EmptyAsyncSearchResponse(optAsync.asyncId, false, 200)
583+
resp, err = elastic_query_dsl.EmptyAsyncSearchResponse(optAsync.asyncId, false, 200)
571584
} else {
572-
return elastic_query_dsl.EmptySearchResponse(ctx), nil
585+
resp, err = elastic_query_dsl.EmptySearchResponse(ctx), nil
573586
}
587+
goto logErrorAndReturn
574588
}
575589

576590
commonTable, ok := tables.Load(common_table.TableName)
577591
if !ok {
578-
return []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s table", common_table.TableName)).Details("Table: %s", common_table.TableName)
592+
resp, err = []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s table", common_table.TableName)).Details("Table: %s", common_table.TableName)
593+
goto logErrorAndReturn
579594
}
580595

581596
// Let's build a union of schemas
@@ -591,7 +606,8 @@ func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern strin
591606
for _, idx := range resolvedIndexes {
592607
scm, ok := schemas[schema.IndexName(idx)]
593608
if !ok {
594-
return []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s schema", idx)).Details("Table: %s", idx)
609+
resp, err = []byte{}, end_user_errors.ErrNoSuchTable.New(fmt.Errorf("can't load %s schema", idx)).Details("Table: %s", idx)
610+
goto logErrorAndReturn
595611
}
596612

597613
for fieldName := range scm.Fields {
@@ -604,9 +620,9 @@ func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern strin
604620
table = commonTable
605621
}
606622

607-
queryTranslator := NewQueryTranslator(ctx, currentSchema, table, q.logManager, q.DateMathRenderer, resolvedIndexes)
623+
queryTranslator = NewQueryTranslator(ctx, currentSchema, table, q.logManager, q.DateMathRenderer, resolvedIndexes)
608624

609-
plan, err := queryTranslator.ParseQuery(body)
625+
plan, err = queryTranslator.ParseQuery(body)
610626

611627
if err != nil {
612628
logger.ErrorWithCtx(ctx).Msgf("parsing error: %v", err)
@@ -617,15 +633,14 @@ func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern strin
617633
queriesBody[i].Query = []byte(query.SelectCommand.String())
618634
queriesBodyConcat += query.SelectCommand.String() + "\n"
619635
}
620-
responseBody = []byte(fmt.Sprintf("Invalid Queries: %v, err: %v", queriesBody, err))
636+
resp = []byte(fmt.Sprintf("Invalid Queries: %v, err: %v", queriesBody, err))
637+
err = errors.New(string(resp))
621638
logger.ErrorWithCtxAndReason(ctx, "Quesma generated invalid SQL query").Msg(queriesBodyConcat)
622-
bodyAsBytes, _ := body.Bytes()
623-
pushSecondaryInfo(q.debugInfoCollector, id, "", path, bodyAsBytes, queriesBody, responseBody, startTime)
624-
return responseBody, errors.New(string(responseBody))
639+
goto logErrorAndReturn
625640
}
626641
err = q.transformQueries(plan)
627642
if err != nil {
628-
return responseBody, err
643+
goto logErrorAndReturn
629644
}
630645
plan.IndexPattern = indexPattern
631646
plan.StartTime = startTime
@@ -636,6 +651,10 @@ func (q *QueryRunner) handleSearchCommon(ctx context.Context, indexPattern strin
636651
}
637652
return q.executePlan(ctx, plan, queryTranslator, table, body, optAsync, nil, true)
638653

654+
logErrorAndReturn:
655+
bodyAsBytes, _ := body.Bytes()
656+
pushSecondaryInfo(q.debugInfoCollector, id, "", path, bodyAsBytes, []diag.TranslatedSQLQuery{}, resp, startTime)
657+
return resp, err
639658
}
640659

641660
func (q *QueryRunner) storeAsyncSearch(qmc diag.DebugInfoCollector, id, asyncId string,

0 commit comments

Comments
 (0)