From 3edb205df344c060011dee1e2b77a5880ed4682b Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Wed, 25 Feb 2026 13:54:52 +0530 Subject: [PATCH 01/24] wip:influxb tools --- tools/influx_db.go | 197 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 tools/influx_db.go diff --git a/tools/influx_db.go b/tools/influx_db.go new file mode 100644 index 00000000..8770162a --- /dev/null +++ b/tools/influx_db.go @@ -0,0 +1,197 @@ +package tools + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + mcpgrafana "github.com/grafana/mcp-grafana" + "github.com/mark3labs/mcp-go/mcp" +) + +const ( + InfluxDBDataSourceType = "influxdb" + + InfluxResMaxDataPoints = 100 +) + +const ( + FluxQueryType = "Flux" + SQLQueryType = "SQL" + InfluxQLQueryType = "InfluxQL" +) + +type influxDBClient struct { + httpClient *http.Client + baseURL string +} + +// newInfluxDBClient creates a new InfluxDB client for the given datasource +func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*influxDBClient, error) { + // Verify the datasource exists and is a InfluxDB datasource + ds, err := getDatasourceByUID(ctx, GetDatasourceByUIDParams{UID: uid}) + if err != nil { + return nil, err + } + + if ds.Type != InfluxDBDataSourceType { + return nil, fmt.Errorf("datasource %s is of type %s, not %s", uid, ds.Type, InfluxDBDataSourceType) + } + + if queryType != nil { + //verify the query lang specified is the one confgured with datasource + dsQueryType := InfluxQLQueryType + + if jsonMap, ok := ds.JSONData.(map[string]interface{}); ok { + if dsQT, ok := jsonMap["version"].(string); ok && dsQT != "" { + dsQueryType = dsQT + } + } + + if *queryType != dsQueryType { + return nil, fmt.Errorf("datasource %s is configured with querytype %s, not %s", uid, dsQueryType, *queryType) + } + } + + cfg := mcpgrafana.GrafanaConfigFromContext(ctx) + baseURL := strings.TrimRight(cfg.URL, "/") + + // Create custom transport with TLS configuration if available + var transport = http.DefaultTransport + if tlsConfig := cfg.TLSConfig; tlsConfig != nil { + var err error + transport, err = tlsConfig.HTTPTransport(transport.(*http.Transport)) + if err != nil { + return nil, fmt.Errorf("failed to create custom transport: %w", err) + } + } + + transport = NewAuthRoundTripper(transport, cfg.AccessToken, cfg.IDToken, cfg.APIKey, cfg.BasicAuth) + transport = mcpgrafana.NewOrgIDRoundTripper(transport, cfg.OrgID) + + client := &http.Client{ + Transport: mcpgrafana.NewUserAgentTransport(transport), + } + + return &influxDBClient{ + httpClient: client, + baseURL: baseURL, + }, nil +} + +type InfluxQueryArgs struct { + DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource to query. Use list_datasources to find available UIDs."` + Query string `json:"query" jsonschema:"required,description=SQL/Flux/InfluxQL query. Supports SQL macros: $__timeFilter for time filtering\\, $__timeFrom/$__timeTo for millisecond timestamps\\, $__interval for calculated intervals\\, $__dateBin()/$__dateBinAlias() to apply date_bin for timestamp columns. Supports Flux macros : v.timeRangeStart\\, v.timeRangeStop\\, v.windowPeriod (Grafana-calculated interval)\\, v.defaultBucket (configured default bucket)\\, v.organization (configured organization)\\."` + QueryType string `json:"query_type"` //TODO : enum options + Start string `json:"start,omitempty" jsonschema:"description=Start time. Formats: 'now-1h'\\, '2026-02-02T19:00:00Z'\\, '1738519200000' (Unix ms). Default: now-1h"` + End string `json:"end,omitempty" jsonschema:"description=End time. Formats: 'now'\\, '2026-02-02T20:00:00Z'\\, '1738522800000' (Unix ms). Default: now"` + IntervalMs uint + Limit uint `json:"limit"` +} + +// influxQueryResponse represents the raw API response from Grafana's /api/ds/query +type influxQueryResponse struct { + Results map[string]struct { + Status int `json:"status,omitempty"` + Frames []struct { + Schema struct { + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty"` + Fields []struct { + Name string `json:"name"` + Type string `json:"type"` + TypeInfo struct { + Frame string `json:"frame,omitempty"` + } `json:"typeInfo,omitempty"` + } `json:"fields"` + } `json:"schema"` + Data struct { + Values [][]interface{} `json:"values"` + } `json:"data"` + } `json:"frames,omitempty"` + Error string `json:"error,omitempty"` + } `json:"results"` +} + +func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, to time.Time) (influxQueryResponse, error) { + payload := map[string]interface{}{ + "queries": []map[string]interface{}{ + { + "datasource": map[string]string{ + "uid": args.DatasourceUID, + "type": InfluxDBDataSourceType, + }, + "refId": "A", + "type": "timeSeriesQuery", + "intervalMs": args.IntervalMs, + "maxDataPoints": args.Limit, + }, + }, + "from": strconv.FormatInt(from.UnixMilli(), 10), + "to": strconv.FormatInt(to.UnixMilli(), 10), + } + + // ic.httpClient.Post(ic.baseURL + "/api/ds/query" , map[string]any{ + // "queries" : map[string]any{ + // "refId" : "A", + // "datasource" : { + // "uid" : args.DatasourceUID, + // } + // } + // }) +} + +func enforceQueryLimit(args *InfluxQueryArgs) { + if args.Limit > InfluxResMaxDataPoints { + args.Limit = InfluxResMaxDataPoints + } +} + +func queryInflux(ctx context.Context, args InfluxQueryArgs) (*influxQueryResponse, error) { + client, err := newInfluxDBClient(ctx, args.DatasourceUID, &args.QueryType) + + if err != nil { + return nil, err + } + + //todo : enforce time range limits + + enforceQueryLimit(&args) + + res, err := client.Query(ctx, args) + if err != nil { + return nil, err + } + + return &res, nil +} + +var QueryInflux = mcpgrafana.MustTool( + "list_loki_label_values", + "Retrieves all unique values associated with a specific `labelName` within a Loki datasource and time range. Returns a list of string values (e.g., for `labelName=\"env\"`, might return `[\"prod\", \"staging\", \"dev\"]`). Useful for discovering filter options. Defaults to the last hour if the time range is omitted.", + queryInflux, + mcp.WithTitleAnnotation("List Loki label values"), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithReadOnlyHintAnnotation(true), +) + +//Query method +//build query and execute +//struct for client -> + +/** + Client Query method + + Request Args (tool params) , Response , + + tool specification (object) + -toolname + -toolhandler + + add tools , + list tools method , + **/ +//create client and reuse From 9e3d11f2bd638de0233a385ae504fd7b0656c79a Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Sat, 28 Feb 2026 14:33:48 +0530 Subject: [PATCH 02/24] feat(influx): add metadata tools(buckets,measurements,tags,keys) --- cmd/mcp-grafana/main.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/mcp-grafana/main.go b/cmd/mcp-grafana/main.go index 1efdb9bc..6a730c50 100644 --- a/cmd/mcp-grafana/main.go +++ b/cmd/mcp-grafana/main.go @@ -45,7 +45,7 @@ type disabledTools struct { dashboard, folder, oncall, asserts, sift, admin, pyroscope, navigation, proxied, annotations, rendering, cloudwatch, write, examples, clickhouse, searchlogs, - runpanelquery bool + runpanelquery, influxdb bool } // Configuration for the Grafana client. @@ -61,7 +61,7 @@ type grafanaConfig struct { } func (dt *disabledTools) addFlags() { - flag.StringVar(&dt.enabledTools, "enabled-tools", "search,datasource,incident,prometheus,loki,alerting,dashboard,folder,oncall,asserts,sift,pyroscope,navigation,proxied,annotations,rendering", "A comma separated list of tools enabled for this server. Can be overwritten entirely or by disabling specific components, e.g. --disable-search.") + flag.StringVar(&dt.enabledTools, "enabled-tools", "search,datasource,incident,prometheus,loki,alerting,dashboard,folder,oncall,asserts,sift,pyroscope,navigation,proxied,annotations,rendering,influxdb", "A comma separated list of tools enabled for this server. Can be overwritten entirely or by disabling specific components, e.g. --disable-search.") flag.BoolVar(&dt.search, "disable-search", false, "Disable search tools") flag.BoolVar(&dt.datasource, "disable-datasource", false, "Disable datasource tools") flag.BoolVar(&dt.incident, "disable-incident", false, "Disable incident tools") @@ -86,6 +86,7 @@ func (dt *disabledTools) addFlags() { flag.BoolVar(&dt.clickhouse, "disable-clickhouse", false, "Disable ClickHouse tools") flag.BoolVar(&dt.searchlogs, "disable-searchlogs", false, "Disable search logs tools") flag.BoolVar(&dt.runpanelquery, "disable-runpanelquery", false, "Disable run panel query tools") + flag.BoolVar(&dt.influxdb, "disable-influxdb", false, "Disable InfluxDb tools") } func (gc *grafanaConfig) addFlags() { @@ -123,6 +124,7 @@ func (dt *disabledTools) addTools(s *server.MCPServer) { maybeAddTools(s, tools.AddClickHouseTools, enabledTools, dt.clickhouse, "clickhouse") maybeAddTools(s, tools.AddSearchLogsTools, enabledTools, dt.searchlogs, "searchlogs") maybeAddTools(s, tools.AddRunPanelQueryTools, enabledTools, dt.runpanelquery, "runpanelquery") + maybeAddTools(s, tools.AddInfluxTools, enabledTools, dt.influxdb, "influxdb") } func newServer(transport string, dt disabledTools, obs *observability.Observability) (*server.MCPServer, *mcpgrafana.ToolManager) { From e4eb162f624499de5945cbb855e586a38ba74672 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Sun, 1 Mar 2026 12:32:28 +0530 Subject: [PATCH 03/24] chore(docker): add influxdb2,3 for integration tests --- docker-compose.yaml | 40 +++++++++ testdata/localstack-init.sh | 1 - .../provisioning/datasources/datasources.yaml | 37 +++++++++ .../influxdb/.env.influxdb2-admin-password | 1 + .../tools/influxdb/.env.influxdb2-admin-token | 1 + .../influxdb/.env.influxdb2-admin-username | 1 + testdata/tools/influxdb/admin-token.json | 4 + testdata/tools/influxdb/influxdb-2-seed.sh | 83 +++++++++++++++++++ testdata/tools/influxdb/influxdb3-seed.sh | 27 ++++++ testdata/tools/influxdb/user | 0 10 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 testdata/tools/influxdb/.env.influxdb2-admin-password create mode 100644 testdata/tools/influxdb/.env.influxdb2-admin-token create mode 100644 testdata/tools/influxdb/.env.influxdb2-admin-username create mode 100644 testdata/tools/influxdb/admin-token.json create mode 100755 testdata/tools/influxdb/influxdb-2-seed.sh create mode 100644 testdata/tools/influxdb/influxdb3-seed.sh create mode 100644 testdata/tools/influxdb/user diff --git a/docker-compose.yaml b/docker-compose.yaml index 19dae584..8bab2adf 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -132,3 +132,43 @@ services: interval: 10s timeout: 5s retries: 5 + influxdb3: + build: + dockerfile_inline: | + FROM influxdb:3-core@sha256:255268d2a5f42b8c38d373864a4ba72956d91e14a3361019706bfad2f7c039ab + COPY --chmod=777 ./testdata/tools/influxdb/influxdb3-seed.sh /init.sh + ports: + - "8181:8181" + command: > + /bin/bash -c "/init.sh & influxdb3 serve --node-id=node0 --object-store=file --data-dir=/var/lib/influxdb3 --admin-token-file=/run/secrets/admin-token" + secrets: + - admin-token + influxdb2: + image: influxdb:2 + restart: unless-stopped + ports: + - "8086:8086" + volumes: + - ./tools/influxdb/influxdb-2-seed.sh:/docker-entrypoint-initdb.d/init.sh + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME_FILE: /run/secrets/influxdb2-admin-username + DOCKER_INFLUXDB_INIT_PASSWORD_FILE: /run/secrets/influxdb2-admin-password + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN_FILE: /run/secrets/influxdb2-admin-token + DOCKER_INFLUXDB_INIT_ORG: system-logs + DOCKER_INFLUXDB_INIT_BUCKET: b-system-logs + secrets: + - influxdb2-admin-username + - influxdb2-admin-password + - influxdb2-admin-token + +secrets: + influxdb2-admin-username: + file: ./testdata/tools/influxdb/.env.influxdb2-admin-username + influxdb2-admin-password: + file: ./testdata/tools/influxdb/.env.influxdb2-admin-password + influxdb2-admin-token: + file: ./testdata/tools/influxdb/.env.influxdb2-admin-token + admin-token: + file: ./testdata/tools/influxdb/admin-token.json + \ No newline at end of file diff --git a/testdata/localstack-init.sh b/testdata/localstack-init.sh index ad88f32f..663dd694 100755 --- a/testdata/localstack-init.sh +++ b/testdata/localstack-init.sh @@ -1,4 +1,3 @@ -#!/bin/bash set -e echo "Seeding CloudWatch test data..." diff --git a/testdata/provisioning/datasources/datasources.yaml b/testdata/provisioning/datasources/datasources.yaml index fd714408..84eccfa4 100644 --- a/testdata/provisioning/datasources/datasources.yaml +++ b/testdata/provisioning/datasources/datasources.yaml @@ -87,3 +87,40 @@ datasources: accessKey: test secretKey: test isDefault: false + - name: InfluxDB_v2_Flux + id: 9 + uid: influxdb-flux + type: influxdb + access: proxy + url: http://influxdb2:8086 + jsonData: + version: Flux + organization: system-logs + defaultBucket: b-system-logs + tlsSkipVerify: true + secureJsonData: + token: admintoken + - name: InfluxDB_v2_InfluxQL + id: 10 + uid: influxdb-influxql + type: influxdb + access: proxy + url: http://influxdb2:8086 + jsonData: + dbName: b-system-logs + httpHeaderName1: 'Authorization' + secureJsonData: + httpHeaderValue1: 'Token admintoken' + - name: InfluxDB_v3_SQL + id: 11 + uid: influxdb-sql + type: influxdb + access: proxy + url: http://influxdb3:8181 + jsonData: + version: SQL + dbName: system-logs + httpMode: POST + insecureGrpc: true + secureJsonData: + token: 'apiv3_OgXAgbMRgiGXcAQaFLJoaw==' \ No newline at end of file diff --git a/testdata/tools/influxdb/.env.influxdb2-admin-password b/testdata/tools/influxdb/.env.influxdb2-admin-password new file mode 100644 index 00000000..7aa311ad --- /dev/null +++ b/testdata/tools/influxdb/.env.influxdb2-admin-password @@ -0,0 +1 @@ +password \ No newline at end of file diff --git a/testdata/tools/influxdb/.env.influxdb2-admin-token b/testdata/tools/influxdb/.env.influxdb2-admin-token new file mode 100644 index 00000000..029d54ef --- /dev/null +++ b/testdata/tools/influxdb/.env.influxdb2-admin-token @@ -0,0 +1 @@ +admintoken \ No newline at end of file diff --git a/testdata/tools/influxdb/.env.influxdb2-admin-username b/testdata/tools/influxdb/.env.influxdb2-admin-username new file mode 100644 index 00000000..f77b0040 --- /dev/null +++ b/testdata/tools/influxdb/.env.influxdb2-admin-username @@ -0,0 +1 @@ +admin \ No newline at end of file diff --git a/testdata/tools/influxdb/admin-token.json b/testdata/tools/influxdb/admin-token.json new file mode 100644 index 00000000..b6260b91 --- /dev/null +++ b/testdata/tools/influxdb/admin-token.json @@ -0,0 +1,4 @@ +{ + "token": "apiv3_OgXAgbMRgiGXcAQaFLJoaw==", + "name": "_admin" +} diff --git a/testdata/tools/influxdb/influxdb-2-seed.sh b/testdata/tools/influxdb/influxdb-2-seed.sh new file mode 100755 index 00000000..478f59b0 --- /dev/null +++ b/testdata/tools/influxdb/influxdb-2-seed.sh @@ -0,0 +1,83 @@ +#!/bin/bash +echo "Starting InfluxDB v2 data seeding..." + +ADMIN_TOKEN="admintoken" +ORG_NAME="system-logs" +BUCKET_NAME="b-system-logs" + +# --- Generate Timestamps --- +NOW=$(date +%s%N) +M1=$((NOW - 7200000000000)) # 2 hours ago +M2=$((NOW - 3600000000000)) # 1 hour ago +M3=$((NOW - 1800000000000)) # 30 min ago +M4=$((NOW - 900000000000)) # 15 min ago +M5=$((NOW - 300000000000)) # 5 min ago + +# --- Seed b-system-logs bucket --- +echo "Seeding $BUCKET_NAME bucket..." +influx write \ + --token "$ADMIN_TOKEN" \ + --org "$ORG_NAME" \ + --bucket "$BUCKET_NAME" \ + --precision ns \ + < Date: Sun, 1 Mar 2026 12:34:15 +0530 Subject: [PATCH 04/24] wip(influx-tools): add measurement,tag,field list tools --- tools/influx_db.go | 686 ++++++++++++++++++++++++++--- tools/influxdb_integration_test.go | 74 ++++ 2 files changed, 711 insertions(+), 49 deletions(-) create mode 100644 tools/influxdb_integration_test.go diff --git a/tools/influx_db.go b/tools/influx_db.go index 8770162a..0a58bf6a 100644 --- a/tools/influx_db.go +++ b/tools/influx_db.go @@ -1,21 +1,35 @@ package tools import ( + "bytes" "context" + "encoding/json" + "errors" "fmt" + "io" "net/http" + "regexp" "strconv" "strings" "time" mcpgrafana "github.com/grafana/mcp-grafana" "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" ) const ( InfluxDBDataSourceType = "influxdb" - InfluxResMaxDataPoints = 100 + InfluxDBMaxLimit uint = 1000 + InfluxDBDefaultLimit uint = 100 + + InfluxDBMeasurementsDefaultLimit uint = 100 + InfluxDBMeasurementsMaxLimit uint = 1000 + + //limit applied to fields , tags + InfluxDbTagsDefaultLimit uint = 100 + InfluxDbTagsMaxLimit uint = 1000 ) const ( @@ -30,30 +44,33 @@ type influxDBClient struct { } // newInfluxDBClient creates a new InfluxDB client for the given datasource -func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*influxDBClient, error) { +// queryType: when non-nil used to restict the datasource to have same queryType +// returns client along with querytype of datasource +func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*influxDBClient, string, error) { // Verify the datasource exists and is a InfluxDB datasource ds, err := getDatasourceByUID(ctx, GetDatasourceByUIDParams{UID: uid}) if err != nil { - return nil, err + return nil, "", err } if ds.Type != InfluxDBDataSourceType { - return nil, fmt.Errorf("datasource %s is of type %s, not %s", uid, ds.Type, InfluxDBDataSourceType) + return nil, "", fmt.Errorf("datasource %s is of type %s, not %s", uid, ds.Type, InfluxDBDataSourceType) } - if queryType != nil { - //verify the query lang specified is the one confgured with datasource - dsQueryType := InfluxQLQueryType + //verify the query lang specified is the one confgured with datasource + dsQueryType := InfluxQLQueryType - if jsonMap, ok := ds.JSONData.(map[string]interface{}); ok { - if dsQT, ok := jsonMap["version"].(string); ok && dsQT != "" { - dsQueryType = dsQT - } + if jsonMap, ok := ds.JSONData.(map[string]interface{}); ok { + if dsQT, ok := jsonMap["version"].(string); ok && dsQT != "" { + dsQueryType = dsQT } + } + if queryType != nil { if *queryType != dsQueryType { - return nil, fmt.Errorf("datasource %s is configured with querytype %s, not %s", uid, dsQueryType, *queryType) + return nil, dsQueryType, fmt.Errorf("datasource %s is configured with querytype %s, not %s", uid, dsQueryType, *queryType) } + } cfg := mcpgrafana.GrafanaConfigFromContext(ctx) @@ -65,7 +82,7 @@ func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*inf var err error transport, err = tlsConfig.HTTPTransport(transport.(*http.Transport)) if err != nil { - return nil, fmt.Errorf("failed to create custom transport: %w", err) + return nil, dsQueryType, fmt.Errorf("failed to create custom transport: %w", err) } } @@ -79,13 +96,13 @@ func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*inf return &influxDBClient{ httpClient: client, baseURL: baseURL, - }, nil + }, dsQueryType, nil } type InfluxQueryArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource to query. Use list_datasources to find available UIDs."` Query string `json:"query" jsonschema:"required,description=SQL/Flux/InfluxQL query. Supports SQL macros: $__timeFilter for time filtering\\, $__timeFrom/$__timeTo for millisecond timestamps\\, $__interval for calculated intervals\\, $__dateBin()/$__dateBinAlias() to apply date_bin for timestamp columns. Supports Flux macros : v.timeRangeStart\\, v.timeRangeStop\\, v.windowPeriod (Grafana-calculated interval)\\, v.defaultBucket (configured default bucket)\\, v.organization (configured organization)\\."` - QueryType string `json:"query_type"` //TODO : enum options + QueryType string `json:"query_type" jsonschema:"required,enum=SQL,enum=Flux,enum=InfluxQL,description=QueryType of Datasource one of the specified options"` Start string `json:"start,omitempty" jsonschema:"description=Start time. Formats: 'now-1h'\\, '2026-02-02T19:00:00Z'\\, '1738519200000' (Unix ms). Default: now-1h"` End string `json:"end,omitempty" jsonschema:"description=End time. Formats: 'now'\\, '2026-02-02T20:00:00Z'\\, '1738522800000' (Unix ms). Default: now"` IntervalMs uint @@ -101,13 +118,16 @@ type influxQueryResponse struct { Name string `json:"name,omitempty"` RefID string `json:"refId,omitempty"` Fields []struct { + Labels struct { + Field string `json:"_field,omitempty"` + } `json:"labels"` Name string `json:"name"` Type string `json:"type"` TypeInfo struct { Frame string `json:"frame,omitempty"` } `json:"typeInfo,omitempty"` } `json:"fields"` - } `json:"schema"` + } `json:"schema,omitempty"` Data struct { Values [][]interface{} `json:"values"` } `json:"data"` @@ -116,7 +136,43 @@ type influxQueryResponse struct { } `json:"results"` } -func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, to time.Time) (influxQueryResponse, error) { +type InfluxQueryResFrame struct { + Name string + Columns []string + Rows []map[string]any + RowCount uint +} +type InfluxQueryResult struct { + Frames []*InfluxQueryResFrame + FramesCount int + Hints *EmptyResultHints `json:"hints,omitempty"` +} + +func queryTypePayloadKey(queryType string) (string, error) { + if queryType == SQLQueryType { + return "rawSql", nil + } + + if queryType == InfluxQLQueryType || queryType == FluxQueryType { + return "query", nil + } + + return "", fmt.Errorf("unknown query type: %s", queryType) +} + +func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, to time.Time) (*influxQueryResponse, error) { + queryPayloadKey, err := queryTypePayloadKey(args.QueryType) + + if err != nil { + //pass errors + return nil, err + } + format := "time_series" + + if args.QueryType == SQLQueryType { + format = "table" + } + payload := map[string]interface{}{ "queries": []map[string]interface{}{ { @@ -126,72 +182,604 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, }, "refId": "A", "type": "timeSeriesQuery", + "format": format, "intervalMs": args.IntervalMs, - "maxDataPoints": args.Limit, + queryPayloadKey: args.Query, + "rawQuery": true, + "limit": "", + "resultFormat": "time_series", }, }, "from": strconv.FormatInt(from.UnixMilli(), 10), "to": strconv.FormatInt(to.UnixMilli(), 10), } - // ic.httpClient.Post(ic.baseURL + "/api/ds/query" , map[string]any{ - // "queries" : map[string]any{ - // "refId" : "A", - // "datasource" : { - // "uid" : args.DatasourceUID, - // } - // } - // }) + fmt.Println(payload) + + payloadBytes, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("marshaling query payload: %w", err) + } + + url := ic.baseURL + "/api/ds/query" + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payloadBytes)) + if err != nil { + return nil, fmt.Errorf("creating request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := ic.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("executing request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("InfluxDB query returned status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + // Read and parse response + body := io.LimitReader(resp.Body, 1024*1024*60) // 48MB limit + bodyBytes, err := io.ReadAll(body) + if err != nil { + return nil, fmt.Errorf("reading response body: %w", err) + } + fmt.Println(len(bodyBytes)) + var queryResp influxQueryResponse + if err := json.Unmarshal(bodyBytes, &queryResp); err != nil { + return nil, fmt.Errorf("unmarshaling response: %w", err) + } + + return &queryResp, nil } func enforceQueryLimit(args *InfluxQueryArgs) { - if args.Limit > InfluxResMaxDataPoints { - args.Limit = InfluxResMaxDataPoints + //flux , influxql limits per measurement(influxql) , table(flux) level so no of measurments * limit is final records + //sql limit applies on final records level + + limit := InfluxDBDefaultLimit + + if args.Limit >= InfluxDBMaxLimit { + limit = InfluxDBMaxLimit + } else if args.Limit > 0 { + limit = args.Limit + } + + if args.QueryType == SQLQueryType { + //wrap query and apply limit + query := strings.TrimSuffix(args.Query, ";") + args.Query = "(" + query + ")" + fmt.Sprintf(" LIMIT %d", limit) + } + if args.QueryType == InfluxQLQueryType { + //TODO : apply limit , idea : from end of string by overriding existing + } + if args.QueryType == FluxQueryType { + //TODO : apply limits for flux query type } } -func queryInflux(ctx context.Context, args InfluxQueryArgs) (*influxQueryResponse, error) { - client, err := newInfluxDBClient(ctx, args.DatasourceUID, &args.QueryType) +func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { + // Parse time range + defaultPeriod := time.Hour + + now := time.Now() + fromTime := now.Add(-1 * defaultPeriod) // Default: 1 hour ago + toTime := now // Default: now + + if start != "" { + parsed, err := parseStartTime(start) + if err != nil { + return nil, nil, fmt.Errorf("parsing start time: %w", err) + } + if !parsed.IsZero() { + fromTime = parsed + } + + //set relative end time 1hour from start + if end == "" { + toTime = fromTime.Add(defaultPeriod) + } + } + + if end != "" { + parsed, err := parseEndTime(end) + if err != nil { + return nil, nil, fmt.Errorf("parsing end time: %w", err) + } + if !parsed.IsZero() { + toTime = parsed + } + + if start == "" { + fromTime = toTime.Add(-1 * defaultPeriod) + } + } + + return &fromTime, &toTime, nil + +} + +func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, error) { + client, _, err := newInfluxDBClient(ctx, args.DatasourceUID, &args.QueryType) if err != nil { return nil, err } - //todo : enforce time range limits + originalQuery := args.Query enforceQueryLimit(&args) + from, to, err := parseTimeRange(args.Start, args.End) + if err != nil { + return nil, err + } - res, err := client.Query(ctx, args) + resp, err := client.Query(ctx, args, *from, *to) if err != nil { return nil, err } - return &res, nil + result := InfluxQueryResult{} + + hasResults := false + + for refID, r := range resp.Results { + if r.Error != "" { + return nil, fmt.Errorf("query error (refId=%s): %s", refID, r.Error) + } + + result.Frames = make([]*InfluxQueryResFrame, 0, len(r.Frames)) + + for _, frame := range r.Frames { + + noOfCol := len(frame.Schema.Fields) + if noOfCol == 0 { + //no columns for frame , skip frame + continue + } + + resFrame := InfluxQueryResFrame{} + resFrame.Columns = make([]string, 0, noOfCol) + + //no of rows count derived from count of values of first column + rowCount := (len(frame.Data.Values[0])) + resFrame.RowCount = uint(rowCount) + resFrame.Rows = make([]map[string]any, 0, rowCount) + resFrame.Name = frame.Schema.Name + + for colNo, field := range frame.Schema.Fields { + + fieldName := field.Name + + if field.Labels.Field != "" && field.Name == "_value" { + //use field name for column values of flux queries + fieldName = field.Labels.Field + } + + resFrame.Columns = append(resFrame.Columns, fieldName) + + for rowId, colValue := range frame.Data.Values[colNo] { + if len(resFrame.Rows) < (rowId + 1) { + resFrame.Rows = append(resFrame.Rows, make(map[string]any)) + } + + resFrame.Rows[rowId][fieldName] = colValue + } + } + + result.Frames = append(result.Frames, &resFrame) + if rowCount > 0 && !hasResults { + hasResults = true + } + } + } + + result.FramesCount = len(result.Frames) + + /* + InfluxQL Query has a frame for each column selection , ( different selection set result in varying row count for each frame) + SQL Query results in a single frame , selected columsn are mapped in frame.columns + */ + + if !hasResults { + result.Hints = GenerateEmptyResultHints(HintContext{ + DatasourceType: InfluxDBDataSourceType, + Query: originalQuery, + ProcessedQuery: args.Query, + StartTime: *from, + EndTime: *to, + }) + } + + return &result, nil } var QueryInflux = mcpgrafana.MustTool( - "list_loki_label_values", - "Retrieves all unique values associated with a specific `labelName` within a Loki datasource and time range. Returns a list of string values (e.g., for `labelName=\"env\"`, might return `[\"prod\", \"staging\", \"dev\"]`). Useful for discovering filter options. Defaults to the last hour if the time range is omitted.", + "query_influx", + "Queries influxdb of a datasource , supports one of flux , sql , influxql associated with datasource ", queryInflux, - mcp.WithTitleAnnotation("List Loki label values"), + mcp.WithTitleAnnotation("Query InfluxDB"), mcp.WithIdempotentHintAnnotation(true), mcp.WithReadOnlyHintAnnotation(true), ) -//Query method -//build query and execute -//struct for client -> +type ListBucketArgs struct { + DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` +} +type ListBucketResult struct { + Buckets *[]string `json:"buckets"` + BucketCount uint `json:"bucketCount"` + Hints *EmptyResultHints `json:"hints,omitempty"` +} + +func extractColValues(resp *influxQueryResponse, colName string) (*[]string, error) { + fieldValues := make([]string, 0) + + for _, result := range resp.Results { + + if result.Error != "" { + return nil, errors.New(result.Error) + } + + for _, frame := range result.Frames { + fieldColIdx := -1 + + for idx, field := range frame.Schema.Fields { + if field.Name == colName { + fieldColIdx = idx + break + } + } + + if fieldColIdx == -1 { + //no bucket name col found + continue + } + + if len(frame.Data.Values) <= fieldColIdx { + continue + } + + resizedFieldValues := make([]string, len(fieldValues), len(fieldValues)+len(frame.Data.Values[fieldColIdx])) + copy(resizedFieldValues, fieldValues) + fieldValues = resizedFieldValues + + for _, name := range frame.Data.Values[fieldColIdx] { + fieldValues = append(fieldValues, name.(string)) + } + } + } + + return &fieldValues, nil +} + +func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, error) { + queryType := FluxQueryType + client, _, err := newInfluxDBClient(ctx, args.DatasourceUID, &queryType) + + if err != nil { + pattern := `^datasource \S+ is configured with querytype \S+, not \S+$` + + matched, _ := regexp.MatchString(pattern, err.Error()) + + if matched { + return nil, fmt.Errorf("Datasource is not configured with FluxQL , bucket listing is explicit to FluxQL linked datasources") + } + return nil, err + } -/** - Client Query method + query := "buckets()" - Request Args (tool params) , Response , + refTime := time.Now() - tool specification (object) - -toolname - -toolhandler + response, err := client.Query(ctx, InfluxQueryArgs{DatasourceUID: args.DatasourceUID, Query: query, QueryType: FluxQueryType, Start: "", End: ""}, refTime, refTime) - add tools , - list tools method , - **/ -//create client and reuse + if err != nil { + return nil, err + } + + buckets, err := extractColValues(response, "name") + + if err != nil { + return nil, err + } + + result := ListBucketResult{} + + if len(*buckets) == 0 { + //return empty result hints + result.Hints = GenerateEmptyResultHints(HintContext{ + DatasourceType: FluxQueryType, + Query: query, + ProcessedQuery: query, + StartTime: refTime, + EndTime: refTime, + Error: fmt.Errorf("Empty results , check is buckets exist for connected datasources"), + }) + } + + result.BucketCount = uint(len(*buckets)) + result.Buckets = buckets + return &result, nil +} + +var ListBucketsInflux = mcpgrafana.MustTool( + "list_buckets_influxdb", + "Lists buckets of a InfluxDB Datasource identified with DataSourceId , requires the datasources to be linked with FluxQL , use in order list_datasources -> get_datasources -> list_buckets_influxdb", + listBuckets, + mcp.WithTitleAnnotation("List Buckets InfluxDB"), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithReadOnlyHintAnnotation(true), +) + +type ListMeasurementsArgs struct { + DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,only required for FluxQL linked datasources."` + Limit uint `json:"limit"` +} + +type ListMeasurementResult struct { + Measurements *[]string `json:"measurements"` + MeasurementCount uint `json:"measurementCount"` + Hints *EmptyResultHints `json:"hints,omitempty"` +} + +func enforeMeasurementsLimit(args *ListMeasurementsArgs) { + if args.Limit > InfluxDBMeasurementsMaxLimit { + args.Limit = InfluxDBMeasurementsMaxLimit + } + if args.Limit == 0 { + args.Limit = InfluxDBMeasurementsDefaultLimit + } +} +func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeasurementResult, error) { + client, queryType, err := newInfluxDBClient(ctx, args.DatasourceUID, nil) + if err != nil { + return nil, err + } + + enforeMeasurementsLimit(&args) + + if queryType == FluxQueryType && args.Bucket == "" { + return nil, fmt.Errorf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType) + } + var query string + //represents column key of measurment in response + var colKey string + switch queryType { + case SQLQueryType: + query = fmt.Sprintf("SELECT table_name FROM information_schema.tables WHERE table_schema = 'iox' ORDER BY table_name LIMIT %d", args.Limit) + colKey = "table_name" + case FluxQueryType: + query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurements(bucket: "%s")|> limit(n: %d)`, args.Bucket, args.Limit) + colKey = "_value" + case InfluxQLQueryType: + query = fmt.Sprintf("SHOW MEASUREMENTS LIMIT %d", args.Limit) + colKey = "Value" + } + + refTime := time.Now() + response, err := client.Query(ctx, InfluxQueryArgs{DatasourceUID: args.DatasourceUID, Query: query, QueryType: queryType, Start: "", End: ""}, refTime, refTime) + + if err != nil { + return nil, err + } + + measurements, err := extractColValues(response, colKey) + + if err != nil { + return nil, err + } + + result := ListMeasurementResult{} + + if len(*measurements) == 0 { + //add empty results hints + result.Hints = GenerateEmptyResultHints(HintContext{ + DatasourceType: FluxQueryType, + Query: query, + ProcessedQuery: query, + StartTime: refTime, + EndTime: refTime, + Error: fmt.Errorf("No measurements found , verify at datasource"), + }) + } + + result.MeasurementCount = uint(len(*measurements)) + result.Measurements = measurements + return &result, nil +} + +var ListMeasurements = mcpgrafana.MustTool( + "list_measurements_influxdb", + "Lists Measurments of a InfluxDB Datasource identified with DataSourceId , use in order list_datasources -> get_datasources -> list_buckets_influxdb(only for fluxql linked datasource) -> list_measurements_influxdb", + listMeasurements, + mcp.WithTitleAnnotation("List Measurements InfluxDB"), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithReadOnlyHintAnnotation(true), +) + +type ListTagKeysArgs struct { + DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,only required for FluxQL linked datasources."` + Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` + Limit uint `json:"limit"` +} +type ListTagKeysResult struct { + Tags *[]string `json:"tags"` + TagCount uint `json:"tagCount"` + Hints *EmptyResultHints `json:"hints,omitempty"` +} + +func enforeTagKeysLimit(args *ListTagKeysArgs) { + if args.Limit > InfluxDbTagsMaxLimit { + args.Limit = InfluxDbTagsMaxLimit + } + if args.Limit == 0 { + args.Limit = InfluxDbTagsDefaultLimit + } +} + +func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, error) { + enforeTagKeysLimit(&args) + + client, queryType, err := newInfluxDBClient(ctx, args.DatasourceUID, nil) + + if err != nil { + return nil, err + } + + var tagColumnKey string + var query string + + switch queryType { + case SQLQueryType: + //data_type 'Dictionary%%' distiguishes tags from fields for SQL QURIES + query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", args.Measurement, args.Limit) + tagColumnKey = "column_name" + case FluxQueryType: + query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementTagKeys(bucket: "%s", measurement: "%s")|> limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) + tagColumnKey = "_value" + case InfluxQLQueryType: + query = fmt.Sprintf(`SHOW TAG KEYS FROM %s LIMIT %d`, args.Measurement, args.Limit) + tagColumnKey = "Value" + } + + refTime := time.Now() + response, err := client.Query(ctx, InfluxQueryArgs{DatasourceUID: args.DatasourceUID, Query: query, QueryType: queryType, Start: "", End: ""}, refTime, refTime) + + if err != nil { + return nil, err + } + + tags, err := extractColValues(response, tagColumnKey) + + if err != nil { + return nil, err + } + + result := ListTagKeysResult{} + + if len(*tags) == 0 { + //add empty results hints + result.Hints = GenerateEmptyResultHints(HintContext{ + DatasourceType: FluxQueryType, + Query: query, + ProcessedQuery: query, + StartTime: refTime, + EndTime: refTime, + Error: fmt.Errorf("No tags found , verify at datasource"), + }) + } + + result.TagCount = uint(len(*tags)) + result.Tags = tags + return &result, nil +} + +var ListTagKeys = mcpgrafana.MustTool( + "list_tag_keys_influxdb", + "Lists Tag Keys of a InfluxDB Datasource identified with DataSourceId , use in order list_datasources -> get_datasources -> list_buckets_influxdb -> list_measurements_influxdb -> list_tag_keys_influxdb", + listTagKeys, + mcp.WithTitleAnnotation("List Tag Keys InfluxDB"), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithReadOnlyHintAnnotation(true), +) + +type ListFieldKeysArgs struct { + DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,only required for FluxQL linked datasources."` + Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` + Limit uint `json:"limit"` +} + +type ListFieldKeysResult struct { + Fields *[]string `json:"fields"` + FieldCount uint `json:"fieldCount"` + Hints *EmptyResultHints `json:"hints,omitempty"` +} + +// field keys, tag key use same variable for limits +func enforeFieldKeysLimit(args *ListFieldKeysArgs) { + if args.Limit > InfluxDbTagsMaxLimit { + args.Limit = InfluxDbTagsMaxLimit + } + if args.Limit == 0 { + args.Limit = InfluxDbTagsDefaultLimit + } +} + +func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysResult, error) { + enforeFieldKeysLimit(&args) + + client, queryType, err := newInfluxDBClient(ctx, args.DatasourceUID, nil) + + if err != nil { + return nil, err + } + + var fieldColumnKey string + var query string + + switch queryType { + case SQLQueryType: + //data_type 'Dictionary%%' distiguishes tags from fields for SQL QURIES + query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type NOT LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", args.Measurement, args.Limit) + fieldColumnKey = "column_name" + case FluxQueryType: + query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementFieldKeys(bucket: "%s", measurement: "%s")|> limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) + fieldColumnKey = "_value" + case InfluxQLQueryType: + query = fmt.Sprintf(`SHOW FIELD KEYS FROM %s LIMIT %d`, args.Measurement, args.Limit) + fieldColumnKey = "Value" + } + + refTime := time.Now() + response, err := client.Query(ctx, InfluxQueryArgs{DatasourceUID: args.DatasourceUID, Query: query, QueryType: queryType, Start: "", End: ""}, refTime, refTime) + + if err != nil { + return nil, err + } + + tags, err := extractColValues(response, fieldColumnKey) + + if err != nil { + return nil, err + } + + result := ListFieldKeysResult{} + + if len(*tags) == 0 { + //add empty results hints + result.Hints = GenerateEmptyResultHints(HintContext{ + DatasourceType: FluxQueryType, + Query: query, + ProcessedQuery: query, + StartTime: refTime, + EndTime: refTime, + Error: fmt.Errorf("No tags found , verify at datasource"), + }) + } + + result.FieldCount = uint(len(*tags)) + result.Fields = tags + return &result, nil +} + +var ListFieldKeys = mcpgrafana.MustTool( + "list_field_keys_influxdb", + "Lists Field Keys of a InfluxDB Datasource identified with DataSourceId , use in order list_datasources -> get_datasources -> list_buckets_influxdb -> list_measurements_influxdb -> list_field_keys_influxdb", + listFieldKeys, + mcp.WithTitleAnnotation("List Field Keys InfluxDB"), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithReadOnlyHintAnnotation(true), +) + +func AddInfluxTools(mcp *server.MCPServer) { + QueryInflux.Register(mcp) + ListBucketsInflux.Register(mcp) + ListMeasurements.Register(mcp) + ListTagKeys.Register(mcp) + ListFieldKeys.Register(mcp) +} diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go new file mode 100644 index 00000000..1fcec152 --- /dev/null +++ b/tools/influxdb_integration_test.go @@ -0,0 +1,74 @@ +//go:build integration + +package tools + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_ListBuckets(t *testing.T) { + t.Run("list buckets for FluxQL linked DataSource", func(t *testing.T) { + ctx := newTestContext() + + result, err := listBuckets(ctx, ListBucketArgs{ + DatasourceUID: "influxdb-flux", + }) + require.NoError(t, err) + + assert.Contains(t, *result.Buckets, "b-system-logs", "should list buckets for FluxQL DataSource") + }) + + t.Run("error for SQL linked Datasource", func(t *testing.T) { + ctx := newTestContext() + _, err := listBuckets(ctx, ListBucketArgs{ + DatasourceUID: "influxdb-sql", + }) + require.Error(t, err, "Datasource is not configured with FluxQL , bucket listing is explicit to FluxQL linked datasources") + }) + + t.Run("error for InfluxQL linked Datasource", func(t *testing.T) { + ctx := newTestContext() + _, err := listBuckets(ctx, ListBucketArgs{ + DatasourceUID: "influxdb-influxql", + }) + require.Error(t, err, "Datasource is not configured with FluxQL , bucket listing is explicit to FluxQL linked datasources") + }) +} + +func Test_ListMeasurements(t *testing.T) { + t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { + ctx := newTestContext() + _, err := listMeasurements(ctx, ListMeasurementsArgs{ + DatasourceUID: "influxdb-flux", + }) + require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) + }) + + t.Run("list measurements for Flux linked Datasource", func(t *testing.T) { + ctx := newTestContext() + result, err := listMeasurements(ctx, ListMeasurementsArgs{ + DatasourceUID: "influxdb-flux", + Bucket: "b-system-logs", + }) + require.NoError(t, err) + + t.Log(result.Measurements, result.Hints, result.MeasurementCount) + assert.Subset(t, *result.Measurements, []string{"test"}, "should list measurements for Flux linked Datasource") + }) + + t.Run("should list measurements for SQL linked Datasoure ", func(t *testing.T) { + ctx := newTestContext() + result, err := listMeasurements(ctx, ListMeasurementsArgs{ + DatasourceUID: "influxdb-sql", + Bucket: "b-system-logs", + }) + require.NoError(t, err) + assert.Subset(t, *result.Measurements, []string{}) + }) + + t.Run("should list buckets for InfluxQL linked Datasoure", func(t *testing.T) {}) +} From 70da93c524767dcc2b6fc8ac19b694a3771f321e Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 2 Mar 2026 12:54:16 +0530 Subject: [PATCH 05/24] chore(docker): docker container for influxdb integration tests --- testdata/tools/influxdb/influxdb3-seed.sh | 27 -- ...{influxdb-2-seed.sh => influxdbv2-seed.sh} | 0 testdata/tools/influxdb/influxdbv3-seed.sh | 69 +++++ tools/influx_db.go | 24 +- tools/influxdb_integration_test.go | 275 +++++++++++++++++- 5 files changed, 344 insertions(+), 51 deletions(-) delete mode 100644 testdata/tools/influxdb/influxdb3-seed.sh rename testdata/tools/influxdb/{influxdb-2-seed.sh => influxdbv2-seed.sh} (100%) create mode 100644 testdata/tools/influxdb/influxdbv3-seed.sh diff --git a/testdata/tools/influxdb/influxdb3-seed.sh b/testdata/tools/influxdb/influxdb3-seed.sh deleted file mode 100644 index f3e8ea63..00000000 --- a/testdata/tools/influxdb/influxdb3-seed.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -echo "Seed script start" -sleep 5 -AUTH_TOKEN="apiv3_OgXAgbMRgiGXcAQaFLJoaw==" - -DB_NAME="system-logs" - -echo "Creating database: $DB_NAME" - - -influxdb3 create database $DB_NAME --token $AUTH_TOKEN - -# --- Generate Seed Data (Line Protocol) --- -TIMESTAMP=$(date +%s%N) -ONE_HOUR_AGO=$(($(date +%s%N) - 3600000000000)) - -echo "Seeding data..." -# Measurement 1: auth_events (Security Logs) -# Measurement 2: resource_usage (Hardware Logs) - -cat < limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) @@ -673,8 +674,8 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, }) } - result.TagCount = uint(len(*tags)) - result.Tags = tags + result.TagKeysCount = uint(len(*tags)) + result.TagKeys = tags return &result, nil } @@ -695,9 +696,9 @@ type ListFieldKeysArgs struct { } type ListFieldKeysResult struct { - Fields *[]string `json:"fields"` - FieldCount uint `json:"fieldCount"` - Hints *EmptyResultHints `json:"hints,omitempty"` + FieldKeys *[]string `json:"fields"` + FieldKeysCount uint `json:"fieldCount"` + Hints *EmptyResultHints `json:"hints,omitempty"` } // field keys, tag key use same variable for limits @@ -762,8 +763,8 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR }) } - result.FieldCount = uint(len(*tags)) - result.Fields = tags + result.FieldKeysCount = uint(len(*tags)) + result.FieldKeys = tags return &result, nil } @@ -776,6 +777,7 @@ var ListFieldKeys = mcpgrafana.MustTool( mcp.WithReadOnlyHintAnnotation(true), ) + func AddInfluxTools(mcp *server.MCPServer) { QueryInflux.Register(mcp) ListBucketsInflux.Register(mcp) diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index 1fcec152..dab326e4 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -39,6 +39,54 @@ func Test_ListBuckets(t *testing.T) { }) } +func TestQuery(t *testing.T) { + + t.Run("SQL Query", func(t *testing.T) { + ctx := newTestContext() + query := `SELECT MAX("attempt_count") FROM "auth_events" WHERE "time" >= $__timeFrom AND "time" <= $__timeTo ` + + result, err := queryInflux(ctx, InfluxQueryArgs{ + DatasourceUID: "influxdb-sql", + Query: query, + QueryType: SQLQueryType, + Start: "now-24h", + End: "now", + }) + + //interval adjustment test + + require.NoError(t, err) + + assert.NotEmpty(t, result.Frames, "should contain a frame") + + t.Log(result.Frames[0], result.Hints) + + assert.Len(t, result.Frames, result.FramesCount, "should specify framecount equal to len(frames)") + + attemptCount, ok := result.Frames[0].Rows[0]["max(auth_events.attempt_count)"].(float64) + require.True(t, ok) + assert.Equal(t, attemptCount, 20.0) + }) + + t.Run("InfluxQL Query", func(t *testing.T) { + ctx := newTestContext() + + query := `SELECT mean("severity") FROM "auth_events" WHERE $timeFilter GROUP BY time($__interval) fill(null)` + + result, err := queryInflux(ctx, InfluxQueryArgs{ + DatasourceUID: "influxdb-influxql", + Query: query, + QueryType: InfluxQLQueryType, + Start: "now-1h", + }) + require.NoError(t, err) + t.Log(result.Frames[0], result.Hints) + + assert.NotEmpty(t, result.Frames) + assert.GreaterOrEqual(t, len(result.Frames[0].Rows), 20, "should contain query results") + }) +} + func Test_ListMeasurements(t *testing.T) { t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { ctx := newTestContext() @@ -48,27 +96,228 @@ func Test_ListMeasurements(t *testing.T) { require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) }) - t.Run("list measurements for Flux linked Datasource", func(t *testing.T) { + t.Run("bucket optional for SQL/InfluxQL Datasource", func(t *testing.T) { + dataSourceUIDs := []string{"influxdb-sql", "influxdb-influxql"} + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + _, err := listMeasurements(ctx, ListMeasurementsArgs{ + DatasourceUID: uid, + }) + require.NoError(t, err) + } + }) + + t.Run("list measurements of a Datasource", func(t *testing.T) { + ctx := newTestContext() + + dataSourceUIDs := []string{"influxdb-flux", "influxdb-sql", "influxdb-influxql"} + + for _, uid := range dataSourceUIDs { + result, err := listMeasurements(ctx, ListMeasurementsArgs{ + DatasourceUID: uid, + Bucket: "b-system-logs", + }) + require.NoError(t, err) + + t.Log(result.Measurements, result.Hints, result.MeasurementCount) + assert.Subset(t, *result.Measurements, + []string{"auth_events", "db_queries", "http_requests", "queue_stats", "resource_usage", "syslog"}, + "should list measurements for %s linked Datasource", uid) + } + }) + +} +func Test_ListTagKeys(t *testing.T) { + + t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { ctx := newTestContext() - result, err := listMeasurements(ctx, ListMeasurementsArgs{ + _, err := listTagKeys(ctx, ListTagKeysArgs{ DatasourceUID: "influxdb-flux", - Bucket: "b-system-logs", + Measurement: "auth_events", }) - require.NoError(t, err) + require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) + }) + + t.Run("list tags keys", func(t *testing.T) { + dataSourceUIDs := []string{"influxdb-flux", "influxdb-sql", "influxdb-influxql"} + + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + bucket := "" + + if uid == "influxdb-flux" { + bucket = "b-system-logs" + } + + result, err := listTagKeys(ctx, ListTagKeysArgs{ + DatasourceUID: uid, + Bucket: bucket, + Measurement: "auth_events", + }) + require.NoError(t, err) - t.Log(result.Measurements, result.Hints, result.MeasurementCount) - assert.Subset(t, *result.Measurements, []string{"test"}, "should list measurements for Flux linked Datasource") + t.Log(result.TagKeys, uid, result.Hints) + + assert.Subset(t, *result.TagKeys, + []string{"ip", "status", "service"}, + "should list tag keys for %s linked Datasource", uid) + } }) - t.Run("should list measurements for SQL linked Datasoure ", func(t *testing.T) { + t.Run("hints for empty results", func(t *testing.T) { + dataSourceUIDs := []string{"influxdb-sql", "influxdb-influxql"} + + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + result, err := listTagKeys(ctx, ListTagKeysArgs{ + DatasourceUID: uid, + Measurement: "nonexistent", + }) + require.NoError(t, err) + + t.Log(result.TagKeys, uid, result.Hints) + + assert.NotNil(t, result.Hints, "should return hints") + + assert.Empty(t, *result.TagKeys, "should return empty list for non existent measurement") + } + }) + +} +func Test_ListFieldKeys(t *testing.T) { + + t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { ctx := newTestContext() - result, err := listMeasurements(ctx, ListMeasurementsArgs{ - DatasourceUID: "influxdb-sql", - Bucket: "b-system-logs", + _, err := listFieldKeys(ctx, ListFieldKeysArgs{ + DatasourceUID: "influxdb-flux", + Measurement: "auth_events", }) - require.NoError(t, err) - assert.Subset(t, *result.Measurements, []string{}) + require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) + }) + + t.Run("list field keys", func(t *testing.T) { + dataSourceUIDs := []string{"influxdb-flux", "influxdb-sql", "influxdb-influxql"} + + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + bucket := "" + + if uid == "influxdb-flux" { + bucket = "b-system-logs" + } + + result, err := listFieldKeys(ctx, ListFieldKeysArgs{ + DatasourceUID: uid, + Bucket: bucket, + Measurement: "auth_events", + }) + require.NoError(t, err) + + t.Log(result.FieldKeys, uid, result.Hints) + + assert.Subset(t, *result.FieldKeys, + []string{"attempt_count", "severity"}, + "should list field keys for %s linked Datasource", uid) + } + }) + + t.Run("hints for empty results", func(t *testing.T) { + dataSourceUIDs := []string{"influxdb-sql", "influxdb-influxql"} + + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + result, err := listFieldKeys(ctx, ListFieldKeysArgs{ + DatasourceUID: uid, + Measurement: "nonexistent", + }) + require.NoError(t, err) + + t.Log(result.FieldKeys, uid, result.Hints) + + assert.NotNil(t, result.Hints, "should return hints") + + assert.Empty(t, *result.FieldKeys, "should return empty list for non existent measurement") + } + }) + +} + +func Test_Limit(t *testing.T) { + dataSourceUIDs := []string{"influxdb-flux", "influxdb-sql", "influxdb-influxql"} + + t.Run("list measurements with limits ", func(t *testing.T) { + + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + bucket := "" + if uid == "influxdb-flux" { + bucket = "b-system-logs" + } + + result, err := listMeasurements(ctx, ListMeasurementsArgs{ + DatasourceUID: uid, + Bucket: bucket, + Limit: 1, + }) + require.NoError(t, err) + + t.Log(result.Measurements, uid, result.Hints) + + assert.Len(t, *result.Measurements, 1) + } + }) + + t.Run("list tag keys with limit", func(t *testing.T) { + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + bucket := "" + + if uid == "influxdb-flux" { + bucket = "b-system-logs" + } + + result, err := listTagKeys(ctx, ListTagKeysArgs{ + DatasourceUID: uid, + Bucket: bucket, + Measurement: "auth_events", + Limit: 1, + }) + require.NoError(t, err) + + t.Log(result.TagKeys, uid, result.Hints) + + assert.Len(t, *result.TagKeys, 1) + } + }) + + t.Run("list field keys with limit", func(t *testing.T) { + for _, uid := range dataSourceUIDs { + ctx := newTestContext() + + bucket := "" + + if uid == "influxdb-flux" { + bucket = "b-system-logs" + } + + result, err := listFieldKeys(ctx, ListFieldKeysArgs{ + DatasourceUID: uid, + Bucket: bucket, + Measurement: "auth_events", + Limit: 1, + }) + require.NoError(t, err) + + t.Log(result.FieldKeys, uid, result.Hints) + + assert.Len(t, *result.FieldKeys, 1) + } }) - t.Run("should list buckets for InfluxQL linked Datasoure", func(t *testing.T) {}) } From da080125d13344a3e6d8bc7fdbefd47c0bc0d9ec Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 2 Mar 2026 12:55:03 +0530 Subject: [PATCH 06/24] test(influxdb): add query , limit tests --- tools/influxdb_integration_test.go | 134 ++++++++++++++++++++++++----- 1 file changed, 114 insertions(+), 20 deletions(-) diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index dab326e4..d4c0e81d 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -27,7 +27,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-sql", }) - require.Error(t, err, "Datasource is not configured with FluxQL , bucket listing is explicit to FluxQL linked datasources") + require.Error(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) t.Run("error for InfluxQL linked Datasource", func(t *testing.T) { @@ -35,15 +35,47 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-influxql", }) - require.Error(t, err, "Datasource is not configured with FluxQL , bucket listing is explicit to FluxQL linked datasources") + require.Error(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) } +func Test_Query(t *testing.T) { -func TestQuery(t *testing.T) { + t.Run("Flux Query", func(t *testing.T) { + ctx := newTestContext() + + query := ` + from(bucket: "b-system-logs") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r["_measurement"] == "auth_events") + |> filter(fn: (r) => r["_field"] == "severity") + |> aggregateWindow(every: v.windowPeriod, column: "_value", fn: mean, createEmpty: false) + ` + + result, err := queryInflux(ctx, InfluxQueryArgs{ + DatasourceUID: "influxdb-flux", + Query: query, + QueryType: FluxQueryType, + Start: "now-24h", + End: "now", + }) + require.NoError(t, err) + assert.NotEmpty(t, result.Frames) + + t.Log(result.Frames[0], result.Hints) + + assert.Equal(t, 10, len(result.Frames), "should contain frames of all groups") + + for _, frame := range result.Frames { + assert.Equal(t, 1, len(frame.Rows), "should contain non-empty results for a frame") + _, ok := frame.Rows[0]["severity"] + // should contain field 'severity' + assert.True(t, ok, "should contain queried fields") + } + }) t.Run("SQL Query", func(t *testing.T) { ctx := newTestContext() - query := `SELECT MAX("attempt_count") FROM "auth_events" WHERE "time" >= $__timeFrom AND "time" <= $__timeTo ` + query := `SELECT MAX("attempt_count") AS count FROM "auth_events";` result, err := queryInflux(ctx, InfluxQueryArgs{ DatasourceUID: "influxdb-sql", @@ -53,47 +85,50 @@ func TestQuery(t *testing.T) { End: "now", }) - //interval adjustment test - require.NoError(t, err) assert.NotEmpty(t, result.Frames, "should contain a frame") + assert.Len(t, result.Frames, result.FramesCount, "should specify frame count equal to len(frames)") t.Log(result.Frames[0], result.Hints) + assert.NotEmpty(t, result.Frames[0].Rows, "should contain results") - assert.Len(t, result.Frames, result.FramesCount, "should specify framecount equal to len(frames)") + attemptCount, ok := result.Frames[0].Rows[0]["count"].(float64) - attemptCount, ok := result.Frames[0].Rows[0]["max(auth_events.attempt_count)"].(float64) - require.True(t, ok) - assert.Equal(t, attemptCount, 20.0) + require.True(t, ok, "should contain queried columns with expected type in a row") + assert.Equal(t, 20.0, attemptCount) }) t.Run("InfluxQL Query", func(t *testing.T) { ctx := newTestContext() - query := `SELECT mean("severity") FROM "auth_events" WHERE $timeFilter GROUP BY time($__interval) fill(null)` + query := `SELECT mean("severity") FROM "auth_events" GROUP BY time($__interval) fill(null)` result, err := queryInflux(ctx, InfluxQueryArgs{ DatasourceUID: "influxdb-influxql", Query: query, QueryType: InfluxQLQueryType, - Start: "now-1h", + Start: "now-24h", }) require.NoError(t, err) - t.Log(result.Frames[0], result.Hints) - assert.NotEmpty(t, result.Frames) + + t.Log(result.Frames[0], result.Hints) assert.GreaterOrEqual(t, len(result.Frames[0].Rows), 20, "should contain query results") + + t.Log(result.Frames[0].Rows[0], result.Frames[0].Columns) + _, ok := result.Frames[0].Rows[0][`auth_events.mean`].(float64) + + require.True(t, ok, "should contain queried columns with expected type in a row") }) } - func Test_ListMeasurements(t *testing.T) { t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { ctx := newTestContext() _, err := listMeasurements(ctx, ListMeasurementsArgs{ DatasourceUID: "influxdb-flux", }) - require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) + require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("bucket optional for SQL/InfluxQL Datasource", func(t *testing.T) { @@ -181,7 +216,7 @@ func Test_ListTagKeys(t *testing.T) { assert.NotNil(t, result.Hints, "should return hints") - assert.Empty(t, *result.TagKeys, "should return empty list for non existent measurement") + assert.Empty(t, *result.TagKeys, "should return empty list for non-existent measurement") } }) @@ -194,7 +229,7 @@ func Test_ListFieldKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) + require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("list field keys", func(t *testing.T) { @@ -240,12 +275,11 @@ func Test_ListFieldKeys(t *testing.T) { assert.NotNil(t, result.Hints, "should return hints") - assert.Empty(t, *result.FieldKeys, "should return empty list for non existent measurement") + assert.Empty(t, *result.FieldKeys, "should return empty list for non-existent measurement") } }) } - func Test_Limit(t *testing.T) { dataSourceUIDs := []string{"influxdb-flux", "influxdb-sql", "influxdb-influxql"} @@ -320,4 +354,64 @@ func Test_Limit(t *testing.T) { } }) + t.Run("execute query with limits", func(t *testing.T) { + + queries := []*InfluxQueryArgs{ + { + Query: ` + import "generate" + t1 = + generate.from( + count: 4, + fn: (n) => n + 1, + start: 2022-01-01T00:00:00Z, + stop: 2022-01-05T00:00:00Z, + ) + |> set(key: "tag", value: "foo") + |> group(columns: ["tag"]) + + t2 = + generate.from( + count: 4, + fn: (n) => n * (-1), + start: 2022-01-01T00:00:00Z, + stop: 2022-01-05T00:00:00Z, + ) + |> set(key: "tag", value: "bar") + |> group(columns: ["tag"]) + + union(tables: [t1, t2]) + `, + DatasourceUID: "influxdb-flux", + QueryType: FluxQueryType, + }, + { + Query: `SELECT "attempt_count" FROM "auth_events" LIMIT 3;`, + DatasourceUID: "influxdb-sql", + QueryType: SQLQueryType, + }, + { + Query: `SELECT attempt_count FROM "auth_events" fill(null) LIMIT 10 OFFSET 2`, + DatasourceUID: "influxdb-influxql", + QueryType: InfluxQLQueryType, + }, + } + limit := 1 + for _, query := range queries { + ctx := newTestContext() + + query.Start = "now-4h" + query.End = "now" + query.Limit = uint(limit) + + result, err := queryInflux(ctx, *query) + + require.NoError(t, err) + t.Log(result.Frames, result.Hints) + assert.GreaterOrEqual(t, len(result.Frames), 1) + for _, frame := range result.Frames { + assert.Equal(t, limit, len(frame.Rows), "should limit number of rows of a frame") + } + } + }) } From 796c9044ac5de89e2c2420ea5eb65392e3a14467 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 2 Mar 2026 12:55:48 +0530 Subject: [PATCH 07/24] feat(influxdb): apply limits for queries --- tools/influx_db.go | 117 ++++++++++++++++++++++++++------------------- 1 file changed, 68 insertions(+), 49 deletions(-) diff --git a/tools/influx_db.go b/tools/influx_db.go index bbf05dd5..7bfaf507 100644 --- a/tools/influx_db.go +++ b/tools/influx_db.go @@ -27,7 +27,7 @@ const ( InfluxDBMeasurementsDefaultLimit uint = 100 InfluxDBMeasurementsMaxLimit uint = 1000 - //limit applied to fields , tags + //limit applied to fields, tags InfluxDbTagsDefaultLimit uint = 100 InfluxDbTagsMaxLimit uint = 1000 ) @@ -44,8 +44,8 @@ type influxDBClient struct { } // newInfluxDBClient creates a new InfluxDB client for the given datasource -// queryType: when non-nil used to restict the datasource to have same queryType -// returns client along with querytype of datasource +// queryType: when non-nil used to restrict the datasource to have same queryType +// returns client along with query type of datasource func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*influxDBClient, string, error) { // Verify the datasource exists and is a InfluxDB datasource ds, err := getDatasourceByUID(ctx, GetDatasourceByUIDParams{UID: uid}) @@ -57,7 +57,7 @@ func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*inf return nil, "", fmt.Errorf("datasource %s is of type %s, not %s", uid, ds.Type, InfluxDBDataSourceType) } - //verify the query lang specified is the one confgured with datasource + //verify the query lang specified is the one configured with datasource dsQueryType := InfluxQLQueryType if jsonMap, ok := ds.JSONData.(map[string]interface{}); ok { @@ -102,11 +102,11 @@ func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*inf type InfluxQueryArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource to query. Use list_datasources to find available UIDs."` Query string `json:"query" jsonschema:"required,description=SQL/Flux/InfluxQL query. Supports SQL macros: $__timeFilter for time filtering\\, $__timeFrom/$__timeTo for millisecond timestamps\\, $__interval for calculated intervals\\, $__dateBin()/$__dateBinAlias() to apply date_bin for timestamp columns. Supports Flux macros : v.timeRangeStart\\, v.timeRangeStop\\, v.windowPeriod (Grafana-calculated interval)\\, v.defaultBucket (configured default bucket)\\, v.organization (configured organization)\\."` - QueryType string `json:"query_type" jsonschema:"required,enum=SQL,enum=Flux,enum=InfluxQL,description=QueryType of Datasource one of the specified options"` + QueryType string `json:"query_type" jsonschema:"required,enum=SQL,enum=Flux,enum=InfluxQL,description=QueryType of Datasource. One of the specified options"` Start string `json:"start,omitempty" jsonschema:"description=Start time. Formats: 'now-1h'\\, '2026-02-02T19:00:00Z'\\, '1738519200000' (Unix ms). Default: now-1h"` End string `json:"end,omitempty" jsonschema:"description=End time. Formats: 'now'\\, '2026-02-02T20:00:00Z'\\, '1738522800000' (Unix ms). Default: now"` - IntervalMs uint - Limit uint `json:"limit"` + IntervalMs uint `json:"interval_ms,omitempty" jsonschema:"description=Interval in milliseconds"` + Limit uint `json:"limit,omitempty" jsonschema:"description=Limit number of records per table (or group)"` } // influxQueryResponse represents the raw API response from Grafana's /api/ds/query @@ -126,6 +126,7 @@ type influxQueryResponse struct { TypeInfo struct { Frame string `json:"frame,omitempty"` } `json:"typeInfo,omitempty"` + Config map[string]interface{} `json:"config,omitempty"` } `json:"fields"` } `json:"schema,omitempty"` Data struct { @@ -164,7 +165,7 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, queryPayloadKey, err := queryTypePayloadKey(args.QueryType) if err != nil { - //pass errors + // Pass errors return nil, err } format := "time_series" @@ -235,7 +236,7 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, } func enforceQueryLimit(args *InfluxQueryArgs) { - //flux , influxql limits per measurement(influxql) , table(flux) level so no of measurments * limit is final records + //flux, influxql limits per measurement(influxql), table(flux) level so number of measurements * limit is final records //sql limit applies on final records level limit := InfluxDBDefaultLimit @@ -245,18 +246,32 @@ func enforceQueryLimit(args *InfluxQueryArgs) { } else if args.Limit > 0 { limit = args.Limit } + switch args.QueryType { - if args.QueryType == SQLQueryType { + case SQLQueryType: //wrap query and apply limit query := strings.TrimSuffix(args.Query, ";") args.Query = "(" + query + ")" + fmt.Sprintf(" LIMIT %d", limit) + case InfluxQLQueryType: + //InfluxQL query supports limits in the format + //LIMIT %d | LIMIT %d OFFSET %d, Unsupported: LIMIT %d,%d (from manually testing queries) + limitWithOffset := regexp.MustCompile(`(?i)(limit\s+)\d+(\s+offset\s+\d+)?(\s*$)`) + + if limitWithOffset.Match([]byte(args.Query)) { + + replacement := fmt.Sprintf("${1}%d${2}${3}", limit) + args.Query = limitWithOffset.ReplaceAllString(args.Query, replacement) + + } else { + args.Query = args.Query + fmt.Sprintf(" LIMIT %d", limit) + } + + case FluxQueryType: + //A query can execute selection of multiple tables + //flux |>limit() operator applies limit per table or group + args.Query = strings.TrimSpace(args.Query) + fmt.Sprintf("|>limit(n:%d)", limit) } - if args.QueryType == InfluxQLQueryType { - //TODO : apply limit , idea : from end of string by overriding existing - } - if args.QueryType == FluxQueryType { - //TODO : apply limits for flux query type - } + } func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { @@ -281,7 +296,7 @@ func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { toTime = fromTime.Add(defaultPeriod) } } - + //only pass start time , how does grafana default the end time if end != "" { parsed, err := parseEndTime(end) if err != nil { @@ -335,14 +350,14 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, noOfCol := len(frame.Schema.Fields) if noOfCol == 0 { - //no columns for frame , skip frame + //no columns for frame, skip frame continue } resFrame := InfluxQueryResFrame{} resFrame.Columns = make([]string, 0, noOfCol) - //no of rows count derived from count of values of first column + //Number of rows count derived from count of values of first column rowCount := (len(frame.Data.Values[0])) resFrame.RowCount = uint(rowCount) resFrame.Rows = make([]map[string]any, 0, rowCount) @@ -356,6 +371,12 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, //use field name for column values of flux queries fieldName = field.Labels.Field } + //influxql query with 'time_series' format query + if field.Config != nil { + if displayName, ok := field.Config["displayNameFromDS"].(string); ok && displayName != "" { + fieldName = displayName + } + } resFrame.Columns = append(resFrame.Columns, fieldName) @@ -378,8 +399,8 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, result.FramesCount = len(result.Frames) /* - InfluxQL Query has a frame for each column selection , ( different selection set result in varying row count for each frame) - SQL Query results in a single frame , selected columsn are mapped in frame.columns + InfluxQL Query has a frame for each column selection, (different selection sets result in varying row count for each frame) + SQL Query results in a single frame , selected columns are mapped in frame.columns */ if !hasResults { @@ -397,7 +418,7 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, var QueryInflux = mcpgrafana.MustTool( "query_influx", - "Queries influxdb of a datasource , supports one of flux , sql , influxql associated with datasource ", + "Queries InfluxDB datasource, supports one of Flux, SQL, or InfluxQL query languages configured with the datasource.", queryInflux, mcp.WithTitleAnnotation("Query InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -464,7 +485,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e matched, _ := regexp.MatchString(pattern, err.Error()) if matched { - return nil, fmt.Errorf("Datasource is not configured with FluxQL , bucket listing is explicit to FluxQL linked datasources") + return nil, fmt.Errorf("Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") } return nil, err } @@ -495,7 +516,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("Empty results , check is buckets exist for connected datasources"), + Error: fmt.Errorf("Empty results, check if buckets exist for connected datasources"), }) } @@ -506,7 +527,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e var ListBucketsInflux = mcpgrafana.MustTool( "list_buckets_influxdb", - "Lists buckets of a InfluxDB Datasource identified with DataSourceId , requires the datasources to be linked with FluxQL , use in order list_datasources -> get_datasources -> list_buckets_influxdb", + "Lists buckets of an InfluxDB datasource identified by its UID. Requires the datasource to be configured with FluxQL. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb", listBuckets, mcp.WithTitleAnnotation("List Buckets InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -515,7 +536,7 @@ var ListBucketsInflux = mcpgrafana.MustTool( type ListMeasurementsArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,only required for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from; required only for FluxQL linked datasources."` Limit uint `json:"limit"` } @@ -525,7 +546,7 @@ type ListMeasurementResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } -func enforeMeasurementsLimit(args *ListMeasurementsArgs) { +func enforceMeasurementsLimit(args *ListMeasurementsArgs) { if args.Limit > InfluxDBMeasurementsMaxLimit { args.Limit = InfluxDBMeasurementsMaxLimit } @@ -539,13 +560,13 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas return nil, err } - enforeMeasurementsLimit(&args) + enforceMeasurementsLimit(&args) if queryType == FluxQueryType && args.Bucket == "" { - return nil, fmt.Errorf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType) + return nil, fmt.Errorf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType) } var query string - //represents column key of measurment in response + //represents column key of measurement in response var colKey string switch queryType { case SQLQueryType: @@ -582,7 +603,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("No measurements found , verify at datasource"), + Error: fmt.Errorf("No measurements found, verify at datasource"), }) } @@ -593,7 +614,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas var ListMeasurements = mcpgrafana.MustTool( "list_measurements_influxdb", - "Lists Measurments of a InfluxDB Datasource identified with DataSourceId , use in order list_datasources -> get_datasources -> list_buckets_influxdb(only for fluxql linked datasource) -> list_measurements_influxdb", + "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for FluxQL linked datasource) -> list_measurements_influxdb", listMeasurements, mcp.WithTitleAnnotation("List Measurements InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -602,7 +623,7 @@ var ListMeasurements = mcpgrafana.MustTool( type ListTagKeysArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,only required for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,required only for FluxQL linked datasources."` Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` Limit uint `json:"limit"` } @@ -612,7 +633,7 @@ type ListTagKeysResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } -func enforeTagKeysLimit(args *ListTagKeysArgs) { +func enforceTagKeysLimit(args *ListTagKeysArgs) { if args.Limit > InfluxDbTagsMaxLimit { args.Limit = InfluxDbTagsMaxLimit } @@ -622,7 +643,7 @@ func enforeTagKeysLimit(args *ListTagKeysArgs) { } func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, error) { - enforeTagKeysLimit(&args) + enforceTagKeysLimit(&args) client, queryType, err := newInfluxDBClient(ctx, args.DatasourceUID, nil) @@ -635,15 +656,14 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, switch queryType { case SQLQueryType: - //TODO : Escape '-' for measurement name - //data_type 'Dictionary%%' distiguishes tags from fields for SQL QURIES - query = fmt.Sprintf(`SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d`, args.Bucket, args.Limit) + //data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES + query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", args.Measurement, args.Limit) tagColumnKey = "column_name" case FluxQueryType: query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementTagKeys(bucket: "%s", measurement: "%s")|> limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) tagColumnKey = "_value" case InfluxQLQueryType: - query = fmt.Sprintf(`SHOW TAG KEYS FROM %s LIMIT %d`, args.Measurement, args.Limit) + query = fmt.Sprintf(`SHOW TAG KEYS FROM "%s" LIMIT %d`, args.Measurement, args.Limit) tagColumnKey = "Value" } @@ -670,7 +690,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("No tags found , verify at datasource"), + Error: fmt.Errorf("No tags found, verify at datasource"), }) } @@ -681,7 +701,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, var ListTagKeys = mcpgrafana.MustTool( "list_tag_keys_influxdb", - "Lists Tag Keys of a InfluxDB Datasource identified with DataSourceId , use in order list_datasources -> get_datasources -> list_buckets_influxdb -> list_measurements_influxdb -> list_tag_keys_influxdb", + "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for FluxQL linked datasource) -> list_measurements_influxdb -> list_tag_keys_influxdb", listTagKeys, mcp.WithTitleAnnotation("List Tag Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -690,7 +710,7 @@ var ListTagKeys = mcpgrafana.MustTool( type ListFieldKeysArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,only required for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,required only for FluxQL linked datasources."` Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` Limit uint `json:"limit"` } @@ -702,7 +722,7 @@ type ListFieldKeysResult struct { } // field keys, tag key use same variable for limits -func enforeFieldKeysLimit(args *ListFieldKeysArgs) { +func enforceFieldKeysLimit(args *ListFieldKeysArgs) { if args.Limit > InfluxDbTagsMaxLimit { args.Limit = InfluxDbTagsMaxLimit } @@ -712,7 +732,7 @@ func enforeFieldKeysLimit(args *ListFieldKeysArgs) { } func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysResult, error) { - enforeFieldKeysLimit(&args) + enforceFieldKeysLimit(&args) client, queryType, err := newInfluxDBClient(ctx, args.DatasourceUID, nil) @@ -725,14 +745,14 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR switch queryType { case SQLQueryType: - //data_type 'Dictionary%%' distiguishes tags from fields for SQL QURIES + //data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type NOT LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", args.Measurement, args.Limit) fieldColumnKey = "column_name" case FluxQueryType: query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementFieldKeys(bucket: "%s", measurement: "%s")|> limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) fieldColumnKey = "_value" case InfluxQLQueryType: - query = fmt.Sprintf(`SHOW FIELD KEYS FROM %s LIMIT %d`, args.Measurement, args.Limit) + query = fmt.Sprintf(`SHOW FIELD KEYS FROM "%s" LIMIT %d`, args.Measurement, args.Limit) fieldColumnKey = "Value" } @@ -759,7 +779,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("No tags found , verify at datasource"), + Error: fmt.Errorf("No fields found, verify at datasource"), }) } @@ -770,14 +790,13 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR var ListFieldKeys = mcpgrafana.MustTool( "list_field_keys_influxdb", - "Lists Field Keys of a InfluxDB Datasource identified with DataSourceId , use in order list_datasources -> get_datasources -> list_buckets_influxdb -> list_measurements_influxdb -> list_field_keys_influxdb", + "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for FluxQL linked datasource) -> list_measurements_influxdb -> list_field_keys_influxdb", listFieldKeys, mcp.WithTitleAnnotation("List Field Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), mcp.WithReadOnlyHintAnnotation(true), ) - func AddInfluxTools(mcp *server.MCPServer) { QueryInflux.Register(mcp) ListBucketsInflux.Register(mcp) From 27f25c94eb44fb1e55e76b29bbe5e45af578c3da Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 2 Mar 2026 12:57:34 +0530 Subject: [PATCH 08/24] chore(docker): entrypoints for influxdb init scripts --- cmd/mcp-grafana/main.go | 3 ++- docker-compose.yaml | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cmd/mcp-grafana/main.go b/cmd/mcp-grafana/main.go index 6a730c50..51992145 100644 --- a/cmd/mcp-grafana/main.go +++ b/cmd/mcp-grafana/main.go @@ -61,7 +61,7 @@ type grafanaConfig struct { } func (dt *disabledTools) addFlags() { - flag.StringVar(&dt.enabledTools, "enabled-tools", "search,datasource,incident,prometheus,loki,alerting,dashboard,folder,oncall,asserts,sift,pyroscope,navigation,proxied,annotations,rendering,influxdb", "A comma separated list of tools enabled for this server. Can be overwritten entirely or by disabling specific components, e.g. --disable-search.") + flag.StringVar(&dt.enabledTools, "enabled-tools", "search,datasource,incident,prometheus,loki,alerting,dashboard,folder,oncall,asserts,sift,pyroscope,navigation,proxied,annotations,rendering", "A comma separated list of tools enabled for this server. Can be overwritten entirely or by disabling specific components, e.g. --disable-search.") flag.BoolVar(&dt.search, "disable-search", false, "Disable search tools") flag.BoolVar(&dt.datasource, "disable-datasource", false, "Disable datasource tools") flag.BoolVar(&dt.incident, "disable-incident", false, "Disable incident tools") @@ -179,6 +179,7 @@ Available Capabilities: - Prometheus & Loki: Run PromQL and LogQL queries, retrieve metric/log metadata, and explore label names/values. - ClickHouse: Query ClickHouse datasources via Grafana with macro and variable substitution support. - Elasticsearch: Query Elasticsearch datasources using Lucene syntax or Query DSL for logs and metrics. +- InfluxDB : Query InfluxDB datasourcs with SQL , InfluxQL , Flux languages - Incidents: Search, create, update, and resolve incidents in Grafana Incident. - Sift Investigations: Start and manage Sift investigations, analyze logs/traces, find error patterns, and detect slow requests. - Alerting: List and fetch alert rules and notification contact points. diff --git a/docker-compose.yaml b/docker-compose.yaml index 8bab2adf..6f93b99c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -136,7 +136,7 @@ services: build: dockerfile_inline: | FROM influxdb:3-core@sha256:255268d2a5f42b8c38d373864a4ba72956d91e14a3361019706bfad2f7c039ab - COPY --chmod=777 ./testdata/tools/influxdb/influxdb3-seed.sh /init.sh + COPY --chmod=777 ./testdata/tools/influxdb/influxdbv3-seed.sh /init.sh ports: - "8181:8181" command: > @@ -144,12 +144,12 @@ services: secrets: - admin-token influxdb2: - image: influxdb:2 - restart: unless-stopped + build: + dockerfile_inline: | + FROM influxdb:2 + COPY --chmod=777 ./testdata/tools/influxdb/influxdbv2-seed.sh /docker-entrypoint-initdb.d/init.sh ports: - "8086:8086" - volumes: - - ./tools/influxdb/influxdb-2-seed.sh:/docker-entrypoint-initdb.d/init.sh environment: DOCKER_INFLUXDB_INIT_MODE: setup DOCKER_INFLUXDB_INIT_USERNAME_FILE: /run/secrets/influxdb2-admin-username From e6fd613ff74c892647f240ba0f66ce6ffdc3666e Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 2 Mar 2026 13:28:50 +0530 Subject: [PATCH 09/24] chore(influxdb): update hint context keys --- tools/influx_db.go | 78 +++++++++++++++++------------- tools/influxdb_integration_test.go | 14 +++--- 2 files changed, 53 insertions(+), 39 deletions(-) diff --git a/tools/influx_db.go b/tools/influx_db.go index 7bfaf507..53f62944 100644 --- a/tools/influx_db.go +++ b/tools/influx_db.go @@ -28,8 +28,8 @@ const ( InfluxDBMeasurementsMaxLimit uint = 1000 //limit applied to fields, tags - InfluxDbTagsDefaultLimit uint = 100 - InfluxDbTagsMaxLimit uint = 1000 + InfluxDBTagsDefaultLimit uint = 100 + InfluxDBTagsMaxLimit uint = 1000 ) const ( @@ -138,10 +138,10 @@ type influxQueryResponse struct { } type InfluxQueryResFrame struct { - Name string - Columns []string - Rows []map[string]any - RowCount uint + Name string `json:"name"` + Columns []string `json:"columns"` + Rows []map[string]any `json:"rows"` + RowCount uint `json:"rowCount"` } type InfluxQueryResult struct { Frames []*InfluxQueryResFrame @@ -195,8 +195,6 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, "to": strconv.FormatInt(to.UnixMilli(), 10), } - fmt.Println(payload) - payloadBytes, err := json.Marshal(payload) if err != nil { return nil, fmt.Errorf("marshaling query payload: %w", err) @@ -221,12 +219,11 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, } // Read and parse response - body := io.LimitReader(resp.Body, 1024*1024*60) // 48MB limit + body := io.LimitReader(resp.Body, 1024*1024*48) // 48MB limit bodyBytes, err := io.ReadAll(body) if err != nil { return nil, fmt.Errorf("reading response body: %w", err) } - fmt.Println(len(bodyBytes)) var queryResp influxQueryResponse if err := json.Unmarshal(bodyBytes, &queryResp); err != nil { return nil, fmt.Errorf("unmarshaling response: %w", err) @@ -269,7 +266,7 @@ func enforceQueryLimit(args *InfluxQueryArgs) { case FluxQueryType: //A query can execute selection of multiple tables //flux |>limit() operator applies limit per table or group - args.Query = strings.TrimSpace(args.Query) + fmt.Sprintf("|>limit(n:%d)", limit) + args.Query = strings.TrimSpace(args.Query) + fmt.Sprintf("\n|>limit(n:%d)", limit) } } @@ -296,7 +293,7 @@ func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { toTime = fromTime.Add(defaultPeriod) } } - //only pass start time , how does grafana default the end time + if end != "" { parsed, err := parseEndTime(end) if err != nil { @@ -434,6 +431,13 @@ type ListBucketResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } +/* +* + + Exctracts Values from response of string type columns + +* +*/ func extractColValues(resp *influxQueryResponse, colName string) (*[]string, error) { fieldValues := make([]string, 0) @@ -467,7 +471,11 @@ func extractColValues(resp *influxQueryResponse, colName string) (*[]string, err fieldValues = resizedFieldValues for _, name := range frame.Data.Values[fieldColIdx] { - fieldValues = append(fieldValues, name.(string)) + if s, ok := name.(string); ok { + fieldValues = append(fieldValues, s) + } else { + return nil, errors.New("expected column to be string type") + } } } } @@ -477,14 +485,10 @@ func extractColValues(resp *influxQueryResponse, colName string) (*[]string, err func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, error) { queryType := FluxQueryType - client, _, err := newInfluxDBClient(ctx, args.DatasourceUID, &queryType) + client, sourceQueryType, err := newInfluxDBClient(ctx, args.DatasourceUID, &queryType) if err != nil { - pattern := `^datasource \S+ is configured with querytype \S+, not \S+$` - - matched, _ := regexp.MatchString(pattern, err.Error()) - - if matched { + if sourceQueryType != "" && sourceQueryType != queryType { return nil, fmt.Errorf("Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") } return nil, err @@ -511,7 +515,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e if len(*buckets) == 0 { //return empty result hints result.Hints = GenerateEmptyResultHints(HintContext{ - DatasourceType: FluxQueryType, + DatasourceType: InfluxDBDataSourceType, Query: query, ProcessedQuery: query, StartTime: refTime, @@ -598,7 +602,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas if len(*measurements) == 0 { //add empty results hints result.Hints = GenerateEmptyResultHints(HintContext{ - DatasourceType: FluxQueryType, + DatasourceType: InfluxDBDataSourceType, Query: query, ProcessedQuery: query, StartTime: refTime, @@ -634,11 +638,11 @@ type ListTagKeysResult struct { } func enforceTagKeysLimit(args *ListTagKeysArgs) { - if args.Limit > InfluxDbTagsMaxLimit { - args.Limit = InfluxDbTagsMaxLimit + if args.Limit > InfluxDBTagsMaxLimit { + args.Limit = InfluxDBTagsMaxLimit } if args.Limit == 0 { - args.Limit = InfluxDbTagsDefaultLimit + args.Limit = InfluxDBTagsDefaultLimit } } @@ -651,6 +655,10 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, return nil, err } + if queryType == FluxQueryType && args.Bucket == "" { + return nil, fmt.Errorf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType) + } + var tagColumnKey string var query string @@ -685,7 +693,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, if len(*tags) == 0 { //add empty results hints result.Hints = GenerateEmptyResultHints(HintContext{ - DatasourceType: FluxQueryType, + DatasourceType: InfluxDBDataSourceType, Query: query, ProcessedQuery: query, StartTime: refTime, @@ -723,11 +731,11 @@ type ListFieldKeysResult struct { // field keys, tag key use same variable for limits func enforceFieldKeysLimit(args *ListFieldKeysArgs) { - if args.Limit > InfluxDbTagsMaxLimit { - args.Limit = InfluxDbTagsMaxLimit + if args.Limit > InfluxDBTagsMaxLimit { + args.Limit = InfluxDBTagsMaxLimit } if args.Limit == 0 { - args.Limit = InfluxDbTagsDefaultLimit + args.Limit = InfluxDBTagsDefaultLimit } } @@ -740,6 +748,10 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR return nil, err } + if queryType == FluxQueryType && args.Bucket == "" { + return nil, fmt.Errorf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType) + } + var fieldColumnKey string var query string @@ -763,7 +775,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR return nil, err } - tags, err := extractColValues(response, fieldColumnKey) + fieldKeys, err := extractColValues(response, fieldColumnKey) if err != nil { return nil, err @@ -771,10 +783,10 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR result := ListFieldKeysResult{} - if len(*tags) == 0 { + if len(*fieldKeys) == 0 { //add empty results hints result.Hints = GenerateEmptyResultHints(HintContext{ - DatasourceType: FluxQueryType, + DatasourceType: InfluxDBDataSourceType, Query: query, ProcessedQuery: query, StartTime: refTime, @@ -783,8 +795,8 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR }) } - result.FieldKeysCount = uint(len(*tags)) - result.FieldKeys = tags + result.FieldKeysCount = uint(len(*fieldKeys)) + result.FieldKeys = fieldKeys return &result, nil } diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index d4c0e81d..8d74a55c 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -27,7 +27,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-sql", }) - require.Error(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + require.EqualError(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) t.Run("error for InfluxQL linked Datasource", func(t *testing.T) { @@ -35,7 +35,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-influxql", }) - require.Error(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + require.EqualError(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) } func Test_Query(t *testing.T) { @@ -128,7 +128,7 @@ func Test_ListMeasurements(t *testing.T) { _, err := listMeasurements(ctx, ListMeasurementsArgs{ DatasourceUID: "influxdb-flux", }) - require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("bucket optional for SQL/InfluxQL Datasource", func(t *testing.T) { @@ -170,7 +170,7 @@ func Test_ListTagKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDb Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("list tags keys", func(t *testing.T) { @@ -229,7 +229,7 @@ func Test_ListFieldKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.Error(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("list field keys", func(t *testing.T) { @@ -380,7 +380,7 @@ func Test_Limit(t *testing.T) { |> set(key: "tag", value: "bar") |> group(columns: ["tag"]) - union(tables: [t1, t2]) + union(tables: [t1, t2])//user comments should not bypass limits `, DatasourceUID: "influxdb-flux", QueryType: FluxQueryType, @@ -391,6 +391,8 @@ func Test_Limit(t *testing.T) { QueryType: SQLQueryType, }, { + // enforceQueryLimit rewrites the LIMIT clause to the enforced value while + // preserving the OFFSET, so `LIMIT 10 OFFSET 2` becomes `LIMIT 1 OFFSET 2`. Query: `SELECT attempt_count FROM "auth_events" fill(null) LIMIT 10 OFFSET 2`, DatasourceUID: "influxdb-influxql", QueryType: InfluxQLQueryType, From 1d2642bc0e91fbb0d223be7a22f53048c59f6df7 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Tue, 3 Mar 2026 21:07:50 +0530 Subject: [PATCH 10/24] chore(influx): add len checks --- testdata/localstack-init.sh | 1 + tools/influx_db.go | 24 ++++++++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/testdata/localstack-init.sh b/testdata/localstack-init.sh index 663dd694..ad88f32f 100755 --- a/testdata/localstack-init.sh +++ b/testdata/localstack-init.sh @@ -1,3 +1,4 @@ +#!/bin/bash set -e echo "Seeding CloudWatch test data..." diff --git a/tools/influx_db.go b/tools/influx_db.go index 53f62944..e9b99e1d 100644 --- a/tools/influx_db.go +++ b/tools/influx_db.go @@ -9,6 +9,7 @@ import ( "io" "net/http" "regexp" + "slices" "strconv" "strings" "time" @@ -341,19 +342,25 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, return nil, fmt.Errorf("query error (refId=%s): %s", refID, r.Error) } - result.Frames = make([]*InfluxQueryResFrame, 0, len(r.Frames)) + clonedFrames := make([]*InfluxQueryResFrame, 0, len(result.Frames)+len(r.Frames)) + copy(clonedFrames, result.Frames) + result.Frames = clonedFrames for _, frame := range r.Frames { noOfCol := len(frame.Schema.Fields) if noOfCol == 0 { - //no columns for frame, skip frame + //columns not found for frame, skip frame continue } resFrame := InfluxQueryResFrame{} resFrame.Columns = make([]string, 0, noOfCol) + if len(frame.Data.Values) == 0 { + continue + } + //Number of rows count derived from count of values of first column rowCount := (len(frame.Data.Values[0])) resFrame.RowCount = uint(rowCount) @@ -392,6 +399,7 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, } } } + result.Frames = slices.Clip(result.Frames) result.FramesCount = len(result.Frames) @@ -809,10 +817,10 @@ var ListFieldKeys = mcpgrafana.MustTool( mcp.WithReadOnlyHintAnnotation(true), ) -func AddInfluxTools(mcp *server.MCPServer) { - QueryInflux.Register(mcp) - ListBucketsInflux.Register(mcp) - ListMeasurements.Register(mcp) - ListTagKeys.Register(mcp) - ListFieldKeys.Register(mcp) +func AddInfluxTools(server *server.MCPServer) { + QueryInflux.Register(server) + ListBucketsInflux.Register(server) + ListMeasurements.Register(server) + ListTagKeys.Register(server) + ListFieldKeys.Register(server) } From f07bd52668af42526a467fa20852a9115a97115b Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin <89742297+Shaik-Sirajuddin@users.noreply.github.com> Date: Mon, 23 Mar 2026 11:28:29 +0530 Subject: [PATCH 11/24] Update tools/influx_db.go Co-authored-by: Ben Sully --- tools/influx_db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/influx_db.go b/tools/influx_db.go index e9b99e1d..53c29481 100644 --- a/tools/influx_db.go +++ b/tools/influx_db.go @@ -538,7 +538,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e } var ListBucketsInflux = mcpgrafana.MustTool( - "list_buckets_influxdb", + "list_influxdb_buckets", "Lists buckets of an InfluxDB datasource identified by its UID. Requires the datasource to be configured with FluxQL. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb", listBuckets, mcp.WithTitleAnnotation("List Buckets InfluxDB"), From 55f18949a84e52ffa05658e07d03a096cab84dad Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin <89742297+Shaik-Sirajuddin@users.noreply.github.com> Date: Mon, 23 Mar 2026 11:28:45 +0530 Subject: [PATCH 12/24] Update tools/influx_db.go Co-authored-by: Ben Sully --- tools/influx_db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/influx_db.go b/tools/influx_db.go index 53c29481..334bc1e3 100644 --- a/tools/influx_db.go +++ b/tools/influx_db.go @@ -58,7 +58,7 @@ func newInfluxDBClient(ctx context.Context, uid string, queryType *string) (*inf return nil, "", fmt.Errorf("datasource %s is of type %s, not %s", uid, ds.Type, InfluxDBDataSourceType) } - //verify the query lang specified is the one configured with datasource + // Verify the query lang specified is the one configured with datasource dsQueryType := InfluxQLQueryType if jsonMap, ok := ds.JSONData.(map[string]interface{}); ok { From 2846bdf2e1606fba61d70ab5eab580b28d12ffe2 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 23 Mar 2026 11:35:08 +0530 Subject: [PATCH 13/24] chore(tools): type response definitions influxdb tools , grafana client package --- pkg/grafana/datasource.go | 46 +++++ tools/clickhouse.go | 29 +--- tools/cloudwatch.go | 31 +--- tools/cloudwatch_test.go | 99 ++--------- tools/{influx_db.go => influxdb.go} | 255 +++++++++++++++------------- tools/influxdb_integration_test.go | 6 + 6 files changed, 211 insertions(+), 255 deletions(-) create mode 100644 pkg/grafana/datasource.go rename tools/{influx_db.go => influxdb.go} (81%) diff --git a/pkg/grafana/datasource.go b/pkg/grafana/datasource.go new file mode 100644 index 00000000..919c2e93 --- /dev/null +++ b/pkg/grafana/datasource.go @@ -0,0 +1,46 @@ +// package grafana +package grafana + +import "errors" + +var ErrNoRows = errors.New("no rows in result set") + +type DSQueryPayload struct { + Queries []any `json:"queries"` + From string `json:"from"` + To string `json:"to"` +} + +type DsQueryFrameField struct { + Name string `json:"name"` + Type string `json:"type"` + TypeInfo struct { + Frame string `json:"frame,omitempty"` + } `json:"typeInfo,omitempty"` + Labels struct { + Field string `json:"_field,omitempty"` + } `json:"labels"` + Config map[string]interface{} `json:"config,omitempty"` +} + +type DsQueryFrame struct { + Schema struct { + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty"` + Fields []DsQueryFrameField `json:"fields"` + } `json:"schema,omitempty"` + Data struct { + Values [][]interface{} `json:"values"` + } `json:"data"` +} + +type DsQueryResult struct { + Status int `json:"status,omitempty"` + Frames []DsQueryFrame `json:"frames,omitempty"` + Error string `json:"error,omitempty"` +} + +// DSQueryResponse represents the raw API response from Grafana's /api/ds/query +type DSQueryResponse struct { + Results map[string]DsQueryResult `json:"results"` +} diff --git a/tools/clickhouse.go b/tools/clickhouse.go index f7f6f9b3..2d59e89d 100644 --- a/tools/clickhouse.go +++ b/tools/clickhouse.go @@ -13,6 +13,7 @@ import ( "time" mcpgrafana "github.com/grafana/mcp-grafana" + "github.com/grafana/mcp-grafana/pkg/grafana" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" ) @@ -50,30 +51,6 @@ type ClickHouseQueryResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } -// clickHouseQueryResponse represents the raw API response from Grafana's /api/ds/query -type clickHouseQueryResponse struct { - Results map[string]struct { - Status int `json:"status,omitempty"` - Frames []struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []struct { - Name string `json:"name"` - Type string `json:"type"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` - } `json:"fields"` - } `json:"schema"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` - } `json:"frames,omitempty"` - Error string `json:"error,omitempty"` - } `json:"results"` -} - // clickHouseClient handles communication with Grafana's ClickHouse datasource type clickHouseClient struct { httpClient *http.Client @@ -119,7 +96,7 @@ func newClickHouseClient(ctx context.Context, uid string) (*clickHouseClient, er } // query executes a ClickHouse query via Grafana's /api/ds/query endpoint -func (c *clickHouseClient) query(ctx context.Context, datasourceUID, rawSQL string, from, to time.Time) (*clickHouseQueryResponse, error) { +func (c *clickHouseClient) query(ctx context.Context, datasourceUID, rawSQL string, from, to time.Time) (*grafana.DSQueryResponse, error) { // Build the query payload payload := map[string]interface{}{ "queries": []map[string]interface{}{ @@ -167,7 +144,7 @@ func (c *clickHouseClient) query(ctx context.Context, datasourceUID, rawSQL stri return nil, fmt.Errorf("reading response body: %w", err) } - var queryResp clickHouseQueryResponse + var queryResp grafana.DSQueryResponse if err := json.Unmarshal(bodyBytes, &queryResp); err != nil { return nil, fmt.Errorf("unmarshaling response: %w", err) } diff --git a/tools/cloudwatch.go b/tools/cloudwatch.go index 36560fc5..8009a5ad 100644 --- a/tools/cloudwatch.go +++ b/tools/cloudwatch.go @@ -13,6 +13,7 @@ import ( "time" mcpgrafana "github.com/grafana/mcp-grafana" + "github.com/grafana/mcp-grafana/pkg/grafana" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" ) @@ -48,32 +49,6 @@ type CloudWatchQueryResult struct { Hints []string `json:"hints,omitempty"` } -// cloudWatchQueryResponse represents the raw API response from Grafana's /api/ds/query -type cloudWatchQueryResponse struct { - Results map[string]struct { - Status int `json:"status,omitempty"` - Frames []struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []struct { - Name string `json:"name"` - Type string `json:"type"` - Labels map[string]string `json:"labels,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` - } `json:"fields"` - } `json:"schema"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` - } `json:"frames,omitempty"` - Error string `json:"error,omitempty"` - } `json:"results"` -} - // cloudWatchClient handles communication with Grafana's CloudWatch datasource type cloudWatchClient struct { httpClient *http.Client @@ -119,7 +94,7 @@ func newCloudWatchClient(ctx context.Context, uid string) (*cloudWatchClient, er } // query executes a CloudWatch query via Grafana's /api/ds/query endpoint -func (c *cloudWatchClient) query(ctx context.Context, args CloudWatchQueryParams, from, to time.Time) (*cloudWatchQueryResponse, error) { +func (c *cloudWatchClient) query(ctx context.Context, args CloudWatchQueryParams, from, to time.Time) (*grafana.DSQueryResponse, error) { // Format dimensions for CloudWatch query // CloudWatch expects dimensions as map[string][]string dimensions := make(map[string][]string) @@ -200,7 +175,7 @@ func (c *cloudWatchClient) query(ctx context.Context, args CloudWatchQueryParams return nil, fmt.Errorf("reading response body: %w", err) } - var queryResp cloudWatchQueryResponse + var queryResp grafana.DSQueryResponse if err := json.Unmarshal(bodyBytes, &queryResp); err != nil { return nil, fmt.Errorf("unmarshaling response: %w", err) } diff --git a/tools/cloudwatch_test.go b/tools/cloudwatch_test.go index d4e0c7e0..921f3bc4 100644 --- a/tools/cloudwatch_test.go +++ b/tools/cloudwatch_test.go @@ -7,6 +7,7 @@ import ( "net/url" "testing" + "github.com/grafana/mcp-grafana/pkg/grafana" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -251,108 +252,34 @@ func TestParseCloudWatchMetricsResponse(t *testing.T) { func TestCloudWatchMultiFrameStatistics(t *testing.T) { // Build a cloudWatchQueryResponse with 2 frames to verify statistics // are accumulated across all frames, not just the last one. - resp := &cloudWatchQueryResponse{ - Results: map[string]struct { - Status int `json:"status,omitempty"` - Frames []struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []struct { - Name string `json:"name"` - Type string `json:"type"` - Labels map[string]string `json:"labels,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` - } `json:"fields"` - } `json:"schema"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` - } `json:"frames,omitempty"` - Error string `json:"error,omitempty"` - }{}, - } - - // Frame type for convenience - type frame = struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []struct { - Name string `json:"name"` - Type string `json:"type"` - Labels map[string]string `json:"labels,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` - } `json:"fields"` - } `json:"schema"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` - } - - type field = struct { - Name string `json:"name"` - Type string `json:"type"` - Labels map[string]string `json:"labels,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{}, } // Frame 1: values 10, 20 (sum=30, min=10, max=20) - f1 := frame{} - f1.Schema.Fields = []field{ + f1 := grafana.DsQueryFrame{} + f1.Schema.Fields = []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number"}, } f1.Data.Values = [][]interface{}{ - {float64(1000), float64(2000)}, // timestamps - {float64(10.0), float64(20.0)}, // values + {float64(1000), float64(2000)}, // timestamps + {float64(10.0), float64(20.0)}, // values } // Frame 2: values 5, 40 (sum=45, min=5, max=40) - f2 := frame{} - f2.Schema.Fields = []field{ + f2 := grafana.DsQueryFrame{} + f2.Schema.Fields = []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number"}, } f2.Data.Values = [][]interface{}{ - {float64(3000), float64(4000)}, // timestamps - {float64(5.0), float64(40.0)}, // values - } - - type resultType = struct { - Status int `json:"status,omitempty"` - Frames []struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []struct { - Name string `json:"name"` - Type string `json:"type"` - Labels map[string]string `json:"labels,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` - } `json:"fields"` - } `json:"schema"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` - } `json:"frames,omitempty"` - Error string `json:"error,omitempty"` + {float64(3000), float64(4000)}, // timestamps + {float64(5.0), float64(40.0)}, // values } - resp.Results["A"] = resultType{ - Frames: []frame{f1, f2}, + resp.Results["A"] = grafana.DsQueryResult{ + Frames: []grafana.DsQueryFrame{f1, f2}, } // Process the response the same way queryCloudWatch does diff --git a/tools/influx_db.go b/tools/influxdb.go similarity index 81% rename from tools/influx_db.go rename to tools/influxdb.go index e9b99e1d..cce25ffd 100644 --- a/tools/influx_db.go +++ b/tools/influxdb.go @@ -15,10 +15,12 @@ import ( "time" mcpgrafana "github.com/grafana/mcp-grafana" + "github.com/grafana/mcp-grafana/pkg/grafana" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" ) +// InfluxDB constants define limits and data source types for the InfluxDB client. const ( InfluxDBDataSourceType = "influxdb" @@ -26,13 +28,15 @@ const ( InfluxDBDefaultLimit uint = 100 InfluxDBMeasurementsDefaultLimit uint = 100 + // InfluxDBMeasurementsMaxLimit is the maximum limit applied when listing measurements. InfluxDBMeasurementsMaxLimit uint = 1000 - //limit applied to fields, tags + // InfluxDBTagsDefaultLimit is the default limit applied to fields and tags. InfluxDBTagsDefaultLimit uint = 100 InfluxDBTagsMaxLimit uint = 1000 ) +// Supported query types for the InfluxDB client. const ( FluxQueryType = "Flux" SQLQueryType = "SQL" @@ -110,90 +114,73 @@ type InfluxQueryArgs struct { Limit uint `json:"limit,omitempty" jsonschema:"description=Limit number of records per table (or group)"` } -// influxQueryResponse represents the raw API response from Grafana's /api/ds/query -type influxQueryResponse struct { - Results map[string]struct { - Status int `json:"status,omitempty"` - Frames []struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []struct { - Labels struct { - Field string `json:"_field,omitempty"` - } `json:"labels"` - Name string `json:"name"` - Type string `json:"type"` - TypeInfo struct { - Frame string `json:"frame,omitempty"` - } `json:"typeInfo,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - } `json:"fields"` - } `json:"schema,omitempty"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` - } `json:"frames,omitempty"` - Error string `json:"error,omitempty"` - } `json:"results"` -} - +// InfluxQueryResFrame represents a single frame of data in the query response. type InfluxQueryResFrame struct { Name string `json:"name"` Columns []string `json:"columns"` Rows []map[string]any `json:"rows"` RowCount uint `json:"rowCount"` } +// InfluxQueryResult contains the parsed results of an InfluxDB query. type InfluxQueryResult struct { Frames []*InfluxQueryResFrame FramesCount int Hints *EmptyResultHints `json:"hints,omitempty"` } -func queryTypePayloadKey(queryType string) (string, error) { - if queryType == SQLQueryType { - return "rawSql", nil - } - - if queryType == InfluxQLQueryType || queryType == FluxQueryType { - return "query", nil - } - - return "", fmt.Errorf("unknown query type: %s", queryType) +type InfluxQLQuery struct { + Datasource DatasourceRef `json:"datasource"` + RefID string `json:"refId"` + Type string `json:"type"` + Format string `json:"format"` + IntervalMs uint `json:"intervalMs"` + Query string `json:"query"` + RawSQL string `json:"rawSql"` + RawQuery bool `json:"rawQuery"` + Limit string `json:"limit"` + ResultFormat string `json:"resultFormat"` } -func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, to time.Time) (*influxQueryResponse, error) { - queryPayloadKey, err := queryTypePayloadKey(args.QueryType) +// DatasourceRef encapsulates the unique identifier and type of a Grafana data source. +type DatasourceRef struct { + UID string `json:"uid"` + Type string `json:"type"` +} - if err != nil { - // Pass errors - return nil, err - } +func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, to time.Time) (*grafana.DSQueryResponse, error) { format := "time_series" if args.QueryType == SQLQueryType { format = "table" } - payload := map[string]interface{}{ - "queries": []map[string]interface{}{ - { - "datasource": map[string]string{ - "uid": args.DatasourceUID, - "type": InfluxDBDataSourceType, - }, - "refId": "A", - "type": "timeSeriesQuery", - "format": format, - "intervalMs": args.IntervalMs, - queryPayloadKey: args.Query, - "rawQuery": true, - "limit": "", - "resultFormat": "time_series", - }, + query := InfluxQLQuery{ + Datasource: DatasourceRef{ + UID: args.DatasourceUID, + Type: InfluxDBDataSourceType, }, - "from": strconv.FormatInt(from.UnixMilli(), 10), - "to": strconv.FormatInt(to.UnixMilli(), 10), + RefID: "A", + Type: "timeSeriesQuery", + Format: format, + IntervalMs: args.IntervalMs, + RawQuery: true, + Limit: "", + ResultFormat: "time_series", + } + + // append query + if args.QueryType == SQLQueryType { + query.RawSQL = args.Query + } else { + query.Query = args.Query + } + + payload := grafana.DSQueryPayload{ + Queries: []any{ + query, + }, + From: strconv.FormatInt(from.UnixMilli(), 10), + To: strconv.FormatInt(to.UnixMilli(), 10), } payloadBytes, err := json.Marshal(payload) @@ -220,12 +207,12 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, } // Read and parse response - body := io.LimitReader(resp.Body, 1024*1024*48) // 48MB limit + body := io.LimitReader(resp.Body, 1024*1024*10) // 10MB limit bodyBytes, err := io.ReadAll(body) if err != nil { return nil, fmt.Errorf("reading response body: %w", err) } - var queryResp influxQueryResponse + var queryResp grafana.DSQueryResponse if err := json.Unmarshal(bodyBytes, &queryResp); err != nil { return nil, fmt.Errorf("unmarshaling response: %w", err) } @@ -313,38 +300,24 @@ func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { } -func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, error) { - client, _, err := newInfluxDBClient(ctx, args.DatasourceUID, &args.QueryType) - - if err != nil { - return nil, err - } - - originalQuery := args.Query - - enforceQueryLimit(&args) - from, to, err := parseTimeRange(args.Start, args.End) - if err != nil { - return nil, err - } - - resp, err := client.Query(ctx, args, *from, *to) - if err != nil { - return nil, err - } - - result := InfluxQueryResult{} +// parseQueryResponseFrames parses ds/query response in a json key-pair format +// returns list of frames combined of query results +// treats empty results as an error +func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResFrame, error) { + frames := make([]*InfluxQueryResFrame, 0) hasResults := false + // InfluxQL Query has a frame for each column selection, (different selection sets result in varying row count for each frame) + // SQL Query results in a single frame , selected columns are mapped in frame.columns for refID, r := range resp.Results { if r.Error != "" { return nil, fmt.Errorf("query error (refId=%s): %s", refID, r.Error) } - clonedFrames := make([]*InfluxQueryResFrame, 0, len(result.Frames)+len(r.Frames)) - copy(clonedFrames, result.Frames) - result.Frames = clonedFrames + clonedFrames := make([]*InfluxQueryResFrame, 0, len(frames)+len(r.Frames)) + copy(clonedFrames, frames) + frames = clonedFrames for _, frame := range r.Frames { @@ -393,22 +366,51 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, } } - result.Frames = append(result.Frames, &resFrame) + frames = append(frames, &resFrame) if rowCount > 0 && !hasResults { hasResults = true } } } - result.Frames = slices.Clip(result.Frames) - result.FramesCount = len(result.Frames) + var err error + if !hasResults { + err = grafana.ErrNoRows + } + frames = slices.Clip(frames) - /* - InfluxQL Query has a frame for each column selection, (different selection sets result in varying row count for each frame) - SQL Query results in a single frame , selected columns are mapped in frame.columns - */ + return frames, err +} +func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, error) { + client, _, err := newInfluxDBClient(ctx, args.DatasourceUID, &args.QueryType) - if !hasResults { + if err != nil { + return nil, err + } + + originalQuery := args.Query + + enforceQueryLimit(&args) + from, to, err := parseTimeRange(args.Start, args.End) + if err != nil { + return nil, err + } + + resp, err := client.Query(ctx, args, *from, *to) + if err != nil { + return nil, err + } + + result := InfluxQueryResult{} + + frames, err := parseQueryResponseFrames(resp) + + if err != nil { + if err != grafana.ErrNoRows { + return nil, err + } + // query response returned no rows + // respond sucess with hints result.Hints = GenerateEmptyResultHints(HintContext{ DatasourceType: InfluxDBDataSourceType, Query: originalQuery, @@ -418,12 +420,15 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, }) } + result.Frames = frames + result.FramesCount = len(result.Frames) + return &result, nil } var QueryInflux = mcpgrafana.MustTool( "query_influx", - "Queries InfluxDB datasource, supports one of Flux, SQL, or InfluxQL query languages configured with the datasource.", + "Queries InfluxDB datasource, supports one of Flux, SQL, or InfluxQL query languages. Use in order: list_datasources -> get_datasource to determine query language configured for datasource.Use both list_field_keys_influxdb , list_tag_keys_influxdb to determine the available columns", queryInflux, mcp.WithTitleAnnotation("Query InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -439,14 +444,8 @@ type ListBucketResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } -/* -* - - Exctracts Values from response of string type columns - -* -*/ -func extractColValues(resp *influxQueryResponse, colName string) (*[]string, error) { +// extractColValues extracts Values from response of string type columns +func extractColValues(resp *grafana.DSQueryResponse, colName string) (*[]string, error) { fieldValues := make([]string, 0) for _, result := range resp.Results { @@ -585,7 +584,8 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas query = fmt.Sprintf("SELECT table_name FROM information_schema.tables WHERE table_schema = 'iox' ORDER BY table_name LIMIT %d", args.Limit) colKey = "table_name" case FluxQueryType: - query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurements(bucket: "%s")|> limit(n: %d)`, args.Bucket, args.Limit) + query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurements(bucket: %s)|> limit(n: %d)`, + quoteStringAsFluxLiteral(args.Bucket), args.Limit) colKey = "_value" case InfluxQLQueryType: query = fmt.Sprintf("SHOW MEASUREMENTS LIMIT %d", args.Limit) @@ -654,6 +654,25 @@ func enforceTagKeysLimit(args *ListTagKeysArgs) { } } +func quoteStringAsLiteral(s string) string { + // SQL style: single quotes, escape internal single quotes by doubling + return "'" + strings.ReplaceAll(s, "'", "''") + "'" +} + +func quoteStringAsFluxLiteral(s string) string { + // Flux style: double quotes, escape backslash then double quotes + s = strings.ReplaceAll(s, `\`, `\\`) + s = strings.ReplaceAll(s, `"`, `\"`) + return `"` + s + `"` +} + +func quoteStringAsInfluxQLLiteral(s string) string { + // InfluxQL style: double quotes, escape backslash then double quotes + s = strings.ReplaceAll(s, `\`, `\\`) + s = strings.ReplaceAll(s, `"`, `\"`) + return `"` + s + `"` +} + func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, error) { enforceTagKeysLimit(&args) @@ -672,14 +691,17 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, switch queryType { case SQLQueryType: - //data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES - query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", args.Measurement, args.Limit) + // data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES + query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = %s AND data_type LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", + quoteStringAsLiteral(args.Measurement), args.Limit) tagColumnKey = "column_name" case FluxQueryType: - query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementTagKeys(bucket: "%s", measurement: "%s")|> limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) + query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementTagKeys(bucket: %s, measurement: %s)|> limit(n: %d)`, + quoteStringAsFluxLiteral(args.Bucket), quoteStringAsFluxLiteral(args.Measurement), args.Limit) tagColumnKey = "_value" case InfluxQLQueryType: - query = fmt.Sprintf(`SHOW TAG KEYS FROM "%s" LIMIT %d`, args.Measurement, args.Limit) + query = fmt.Sprintf(`SHOW TAG KEYS FROM %s LIMIT %d`, + quoteStringAsInfluxQLLiteral(args.Measurement), args.Limit) tagColumnKey = "Value" } @@ -737,7 +759,7 @@ type ListFieldKeysResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } -// field keys, tag key use same variable for limits +// enforceFieldKeysLimit applies the default or maximum limits to the provided field keys arguments. func enforceFieldKeysLimit(args *ListFieldKeysArgs) { if args.Limit > InfluxDBTagsMaxLimit { args.Limit = InfluxDBTagsMaxLimit @@ -766,13 +788,16 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR switch queryType { case SQLQueryType: //data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES - query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = '%s' AND data_type NOT LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", args.Measurement, args.Limit) + query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = %s AND data_type NOT LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", + quoteStringAsLiteral(args.Measurement), args.Limit) fieldColumnKey = "column_name" case FluxQueryType: - query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementFieldKeys(bucket: "%s", measurement: "%s")|> limit(n: %d)`, args.Bucket, args.Measurement, args.Limit) + query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementFieldKeys(bucket: %s, measurement: %s)|> limit(n: %d)`, + quoteStringAsFluxLiteral(args.Bucket), quoteStringAsFluxLiteral(args.Measurement), args.Limit) fieldColumnKey = "_value" case InfluxQLQueryType: - query = fmt.Sprintf(`SHOW FIELD KEYS FROM "%s" LIMIT %d`, args.Measurement, args.Limit) + query = fmt.Sprintf(`SHOW FIELD KEYS FROM %s LIMIT %d`, + quoteStringAsInfluxQLLiteral(args.Measurement), args.Limit) fieldColumnKey = "Value" } diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index 8d74a55c..519733b1 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" ) +// Test_ListBuckets verifies the listing of buckets for different InfluxDB datasource linked types. func Test_ListBuckets(t *testing.T) { t.Run("list buckets for FluxQL linked DataSource", func(t *testing.T) { ctx := newTestContext() @@ -38,6 +39,7 @@ func Test_ListBuckets(t *testing.T) { require.EqualError(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) } +// Test_Query verifies querying InfluxDB with Flux, SQL and InfluxQL query languages. func Test_Query(t *testing.T) { t.Run("Flux Query", func(t *testing.T) { @@ -122,6 +124,7 @@ func Test_Query(t *testing.T) { require.True(t, ok, "should contain queried columns with expected type in a row") }) } +// Test_ListMeasurements verifies the listing of measurements for different InfluxDB datasource linked types. func Test_ListMeasurements(t *testing.T) { t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { ctx := newTestContext() @@ -162,6 +165,7 @@ func Test_ListMeasurements(t *testing.T) { }) } +// Test_ListTagKeys verifies the listing of tag keys for different InfluxDB datasource linked types. func Test_ListTagKeys(t *testing.T) { t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { @@ -221,6 +225,7 @@ func Test_ListTagKeys(t *testing.T) { }) } +// Test_ListFieldKeys verifies the listing of field keys for different InfluxDB datasource linked types. func Test_ListFieldKeys(t *testing.T) { t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { @@ -280,6 +285,7 @@ func Test_ListFieldKeys(t *testing.T) { }) } +// Test_Limit verifies the correct application of rate limits on queries across datasource linked types. func Test_Limit(t *testing.T) { dataSourceUIDs := []string{"influxdb-flux", "influxdb-sql", "influxdb-influxql"} From 7337fb12b7fa1163aaad802225947e34f0408f54 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 23 Mar 2026 11:35:46 +0530 Subject: [PATCH 14/24] chore(tools): unit tests for influxdb toosl --- tools/influxdb_unit_test.go | 298 ++++++++++++++++++++++++++++++++++++ 1 file changed, 298 insertions(+) create mode 100644 tools/influxdb_unit_test.go diff --git a/tools/influxdb_unit_test.go b/tools/influxdb_unit_test.go new file mode 100644 index 00000000..17c5b948 --- /dev/null +++ b/tools/influxdb_unit_test.go @@ -0,0 +1,298 @@ +package tools + +import ( + "errors" + "testing" + "time" + + "github.com/grafana/mcp-grafana/pkg/grafana" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_enforceFieldKeysLimit(t *testing.T) { + t.Run("test_feature", func(t *testing.T) { + t.Run("should apply maximum limit when exceeded", func(t *testing.T) { + args := ListFieldKeysArgs{Limit: InfluxDBTagsMaxLimit + 10} + enforceFieldKeysLimit(&args) + assert.Equal(t, InfluxDBTagsMaxLimit, args.Limit, "limit should be maximum limit") + t.Log("applied max limit") + }) + + t.Run("should apply default limit when limit is 0", func(t *testing.T) { + args := ListFieldKeysArgs{Limit: 0} + enforceFieldKeysLimit(&args) + assert.Equal(t, InfluxDBTagsDefaultLimit, args.Limit, "limit should be default limit") + t.Log("applied default limit") + }) + + t.Run("should keep custom limit when within bounds", func(t *testing.T) { + args := ListFieldKeysArgs{Limit: 50} + enforceFieldKeysLimit(&args) + assert.Equal(t, uint(50), args.Limit, "limit should remain custom") + t.Log("kept custom limit") + }) + }) +} + +func Test_enforceTagKeysLimit(t *testing.T) { + t.Run("test_feature", func(t *testing.T) { + t.Run("should apply maximum limit when exceeded", func(t *testing.T) { + args := ListTagKeysArgs{Limit: InfluxDBTagsMaxLimit + 10} + enforceTagKeysLimit(&args) + assert.Equal(t, InfluxDBTagsMaxLimit, args.Limit, "limit should be maximum limit") + t.Log("applied max limit") + }) + + t.Run("should apply default limit when limit is 0", func(t *testing.T) { + args := ListTagKeysArgs{Limit: 0} + enforceTagKeysLimit(&args) + assert.Equal(t, InfluxDBTagsDefaultLimit, args.Limit, "limit should be default limit") + t.Log("applied default limit") + }) + + t.Run("should keep custom limit when within bounds", func(t *testing.T) { + args := ListTagKeysArgs{Limit: 80} + enforceTagKeysLimit(&args) + assert.Equal(t, uint(80), args.Limit, "limit should remain custom") + t.Log("kept custom limit") + }) + }) +} + +func Test_enforceMeasurementsLimit(t *testing.T) { + t.Run("test_feature", func(t *testing.T) { + t.Run("should apply maximum limit when exceeded", func(t *testing.T) { + args := ListMeasurementsArgs{Limit: InfluxDBMeasurementsMaxLimit + 100} + enforceMeasurementsLimit(&args) + assert.Equal(t, InfluxDBMeasurementsMaxLimit, args.Limit, "limit should be maximum limit") + t.Log("applied max limit") + }) + + t.Run("should apply default limit when limit is 0", func(t *testing.T) { + args := ListMeasurementsArgs{Limit: 0} + enforceMeasurementsLimit(&args) + assert.Equal(t, InfluxDBMeasurementsDefaultLimit, args.Limit, "limit should be default limit") + t.Log("applied default limit") + }) + + t.Run("should keep custom limit when within bounds", func(t *testing.T) { + args := ListMeasurementsArgs{Limit: 120} + enforceMeasurementsLimit(&args) + assert.Equal(t, uint(120), args.Limit, "limit should remain custom") + t.Log("kept custom limit") + }) + }) +} + +func Test_enforceQueryLimit(t *testing.T) { + t.Run("test_feature", func(t *testing.T) { + t.Run("should wrap sql query and apply limit", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: SQLQueryType, Query: "SELECT * FROM my_table;", Limit: 10} + enforceQueryLimit(&args) + assert.Equal(t, "(SELECT * FROM my_table) LIMIT 10", args.Query, "sql query should be wrapped and limited") + t.Log("applied sql limit") + }) + + t.Run("should apply flux limit", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: FluxQueryType, Query: "from(bucket: \"my-bucket\")", Limit: 20} + enforceQueryLimit(&args) + assert.Equal(t, "from(bucket: \"my-bucket\")\n|>limit(n:20)", args.Query, "flux query should be limited") + t.Log("applied flux limit") + }) + + t.Run("should replace influxql limit if exists", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: InfluxQLQueryType, Query: "SELECT * FROM my_table LIMIT 100", Limit: 50} + enforceQueryLimit(&args) + assert.Equal(t, "SELECT * FROM my_table LIMIT 50", args.Query, "influxql limit should be replaced") + t.Log("applied influxql replaced limit") + }) + + t.Run("should apply default limit when no limit passed", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: SQLQueryType, Query: "SELECT * FROM table", Limit: 0} + enforceQueryLimit(&args) + assert.Equal(t, "(SELECT * FROM table) LIMIT 100", args.Query, "sql query should use default limit") + t.Log("applied default sql limit") + }) + }) +} + +func Test_parseTimeRange(t *testing.T) { + t.Run("test_feature", func(t *testing.T) { + t.Run("should parse start and infer default end", func(t *testing.T) { + from, to, err := parseTimeRange("2026-02-02T19:00:00Z", "") + require.NoError(t, err, "should not have error") + assert.NotNil(t, from, "from time should not be nil") + assert.NotNil(t, to, "to time should not be nil") + expectedFrom, _ := time.Parse(time.RFC3339, "2026-02-02T19:00:00Z") + assert.Equal(t, expectedFrom, *from, "from time should match parsed start") + assert.Equal(t, expectedFrom.Add(time.Hour), *to, "to time should be 1 hour after from") + t.Log("parsed time range with start only") + }) + + t.Run("should parse start and end", func(t *testing.T) { + from, to, err := parseTimeRange("2026-02-02T19:00:00Z", "2026-02-02T20:00:00Z") + require.NoError(t, err, "should not have error") + expectedFrom, _ := time.Parse(time.RFC3339, "2026-02-02T19:00:00Z") + expectedTo, _ := time.Parse(time.RFC3339, "2026-02-02T20:00:00Z") + assert.Equal(t, expectedFrom, *from, "from time should match parsed start") + assert.Equal(t, expectedTo, *to, "to time should match parsed end") + t.Log("parsed time range with both start and end") + }) + + t.Run("should handle relative start times", func(t *testing.T) { + from, to, err := parseTimeRange("now-2h", "") + require.NoError(t, err, "should not have error parsing relative time") + assert.NotNil(t, from, "from time should not be nil") + assert.NotNil(t, to, "to time should not be nil") + t.Log("parsed relative time range") + }) + }) +} + +func Test_extractColValues(t *testing.T) { + t.Run("test_response", func(t *testing.T) { + t.Run("should extract values from valid response", func(t *testing.T) { + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{ + "A": { + Frames: []grafana.DsQueryFrame{ + { + Schema: struct { + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty"` + Fields []grafana.DsQueryFrameField `json:"fields"` + }{ + Fields: []grafana.DsQueryFrameField{ + {Name: "my_col"}, + }, + }, + Data: struct { + Values [][]interface{} `json:"values"` + }{ + Values: [][]interface{}{ + {"val1", "val2"}, + }, + }, + }, + }, + }, + }, + } + values, err := extractColValues(resp, "my_col") + require.NoError(t, err, "should not have error") + assert.NotNil(t, values, "values should not be nil") + assert.Subset(t, *values, []string{"val1", "val2"}, "values should include extracted strings") + t.Log("extracted valid col values") + }) + + t.Run("should propagate error from result", func(t *testing.T) { + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{ + "A": { + Error: "some target error", + }, + }, + } + values, err := extractColValues(resp, "my_col") + assert.Error(t, err, "should have error") + assert.Equal(t, "some target error", err.Error(), "error message should match") + assert.Nil(t, values, "values should be nil") + t.Log("handled error property in result") + }) + }) +} + +func Test_parseQueryResponseFrames(t *testing.T) { + t.Run("test_response", func(t *testing.T) { + t.Run("should parse frames successfully", func(t *testing.T) { + field1 := grafana.DsQueryFrameField{Name: "time"} + field2 := grafana.DsQueryFrameField{Name: "_value"} + field2.Labels.Field = "temp" + + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{ + "A": { + Frames: []grafana.DsQueryFrame{ + { + Schema: struct { + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty"` + Fields []grafana.DsQueryFrameField `json:"fields"` + }{ + Name: "test_frame", + Fields: []grafana.DsQueryFrameField{ + field1, + field2, + }, + }, + Data: struct { + Values [][]interface{} `json:"values"` + }{ + Values: [][]interface{}{ + {1000, 2000}, + {22.5, 23.0}, + }, + }, + }, + }, + }, + }, + } + frames, err := parseQueryResponseFrames(resp) + require.NoError(t, err, "should not have error") + require.Len(t, frames, 1, "should have 1 frame") + assert.Equal(t, "test_frame", frames[0].Name, "frame name should match") + assert.Subset(t, frames[0].Columns, []string{"time", "temp"}, "columns should be parsed and mapped") + assert.Equal(t, uint(2), frames[0].RowCount, "should have 2 rows") + t.Log("parsed frames successfully") + }) + + t.Run("should return error when results contain error", func(t *testing.T) { + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{ + "A": { + Error: "query failed", + }, + }, + } + frames, err := parseQueryResponseFrames(resp) + assert.Error(t, err, "should return error") + assert.Nil(t, frames, "frames should be nil") + t.Log("returned error correctly for failed query") + }) + + t.Run("should return error when no rows", func(t *testing.T) { + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{ + "A": { + Frames: []grafana.DsQueryFrame{ + { + Schema: struct { + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty"` + Fields []grafana.DsQueryFrameField `json:"fields"` + }{ + Name: "test_frame", + Fields: []grafana.DsQueryFrameField{ + {Name: "time"}, + }, + }, + Data: struct { + Values [][]interface{} `json:"values"` + }{ + Values: [][]interface{}{}, + }, + }, + }, + }, + }, + } + frames, err := parseQueryResponseFrames(resp) + assert.Error(t, err, "should return error when no rows exist") + assert.True(t, errors.Is(err, grafana.ErrNoRows), "error should be ErrNoRows") + assert.Len(t, frames, 0, "frames should be empty") + t.Log("returned no rows error correctly") + }) + }) +} From 04fc22a16ecb0ab6d1c7aecfb8924cee2efecbed Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 23 Mar 2026 11:54:21 +0530 Subject: [PATCH 15/24] chore(tools): apply newline for influxql queries --- tools/influxdb.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index 909bfb79..b17acc9b 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -29,7 +29,7 @@ const ( InfluxDBMeasurementsDefaultLimit uint = 100 // InfluxDBMeasurementsMaxLimit is the maximum limit applied when listing measurements. - InfluxDBMeasurementsMaxLimit uint = 1000 + InfluxDBMeasurementsMaxLimit uint = 1000 // InfluxDBTagsDefaultLimit is the default limit applied to fields and tags. InfluxDBTagsDefaultLimit uint = 100 @@ -121,6 +121,7 @@ type InfluxQueryResFrame struct { Rows []map[string]any `json:"rows"` RowCount uint `json:"rowCount"` } + // InfluxQueryResult contains the parsed results of an InfluxDB query. type InfluxQueryResult struct { Frames []*InfluxQueryResFrame @@ -315,7 +316,7 @@ func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResF return nil, fmt.Errorf("query error (refId=%s): %s", refID, r.Error) } - clonedFrames := make([]*InfluxQueryResFrame, 0, len(frames)+len(r.Frames)) + clonedFrames := make([]*InfluxQueryResFrame, len(frames), len(frames)+len(r.Frames)) copy(clonedFrames, frames) frames = clonedFrames @@ -584,7 +585,9 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas query = fmt.Sprintf("SELECT table_name FROM information_schema.tables WHERE table_schema = 'iox' ORDER BY table_name LIMIT %d", args.Limit) colKey = "table_name" case FluxQueryType: - query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurements(bucket: %s)|> limit(n: %d)`, + query = fmt.Sprintf( + `import "influxdata/influxdb/schema" + schema.measurements(bucket: %s)|> limit(n: %d)`, quoteStringAsFluxLiteral(args.Bucket), args.Limit) colKey = "_value" case InfluxQLQueryType: @@ -667,10 +670,8 @@ func quoteStringAsFluxLiteral(s string) string { } func quoteStringAsInfluxQLLiteral(s string) string { - // InfluxQL style: double quotes, escape backslash then double quotes - s = strings.ReplaceAll(s, `\`, `\\`) - s = strings.ReplaceAll(s, `"`, `\"`) - return `"` + s + `"` + // InfluxQL identical as Flux + return quoteStringAsFluxLiteral(s) } func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, error) { @@ -696,7 +697,9 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, quoteStringAsLiteral(args.Measurement), args.Limit) tagColumnKey = "column_name" case FluxQueryType: - query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementTagKeys(bucket: %s, measurement: %s)|> limit(n: %d)`, + query = fmt.Sprintf( + `import "influxdata/influxdb/schema" + schema.measurementTagKeys(bucket: %s, measurement: %s)|> limit(n: %d)`, quoteStringAsFluxLiteral(args.Bucket), quoteStringAsFluxLiteral(args.Measurement), args.Limit) tagColumnKey = "_value" case InfluxQLQueryType: @@ -792,7 +795,9 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR quoteStringAsLiteral(args.Measurement), args.Limit) fieldColumnKey = "column_name" case FluxQueryType: - query = fmt.Sprintf(`import "influxdata/influxdb/schema" schema.measurementFieldKeys(bucket: %s, measurement: %s)|> limit(n: %d)`, + query = fmt.Sprintf( + `import "influxdata/influxdb/schema" + schema.measurementFieldKeys(bucket: %s, measurement: %s)|> limit(n: %d)`, quoteStringAsFluxLiteral(args.Bucket), quoteStringAsFluxLiteral(args.Measurement), args.Limit) fieldColumnKey = "_value" case InfluxQLQueryType: From d3e4e3a75e6de0c5d65d41ccd09b195abaac6310 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 23 Mar 2026 11:57:19 +0530 Subject: [PATCH 16/24] lint(tools): fix lint errors --- tools/influxdb.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index b17acc9b..95ad65e3 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -497,7 +497,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e if err != nil { if sourceQueryType != "" && sourceQueryType != queryType { - return nil, fmt.Errorf("Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + return nil, fmt.Errorf("datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") } return nil, err } @@ -528,7 +528,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("Empty results, check if buckets exist for connected datasources"), + Error: fmt.Errorf("empty results, check if buckets exist for connected datasources"), }) } @@ -575,7 +575,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas enforceMeasurementsLimit(&args) if queryType == FluxQueryType && args.Bucket == "" { - return nil, fmt.Errorf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType) + return nil, fmt.Errorf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType) } var query string //represents column key of measurement in response @@ -638,7 +638,7 @@ var ListMeasurements = mcpgrafana.MustTool( type ListTagKeysArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,required only for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from\\,required only for FluxQL linked datasources."` Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` Limit uint `json:"limit"` } @@ -751,7 +751,7 @@ var ListTagKeys = mcpgrafana.MustTool( type ListFieldKeysArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from,required only for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from\\,required only for FluxQL linked datasources."` Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` Limit uint `json:"limit"` } From 0c2a00b41d069fd9889480dc6d5db7b448e4bca0 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 23 Mar 2026 11:59:58 +0530 Subject: [PATCH 17/24] lint(tools): fix lint errors --- tools/influxdb.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index 95ad65e3..3cd2c9c0 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -618,7 +618,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("No measurements found, verify at datasource"), + Error: fmt.Errorf("no measurements found, verify at datasource"), }) } @@ -684,7 +684,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, } if queryType == FluxQueryType && args.Bucket == "" { - return nil, fmt.Errorf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType) + return nil, fmt.Errorf("bucket is required for %s linked InfluxDB datasources", FluxQueryType) } var tagColumnKey string @@ -731,7 +731,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("No tags found, verify at datasource"), + Error: fmt.Errorf("no tags found, verify at datasource"), }) } @@ -782,7 +782,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR } if queryType == FluxQueryType && args.Bucket == "" { - return nil, fmt.Errorf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType) + return nil, fmt.Errorf("bucket is required for %s linked InfluxDB datasources", FluxQueryType) } var fieldColumnKey string @@ -829,7 +829,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR ProcessedQuery: query, StartTime: refTime, EndTime: refTime, - Error: fmt.Errorf("No fields found, verify at datasource"), + Error: fmt.Errorf("no fields found, verify at datasource"), }) } From 24650b14bc4450281d97121fbd960f3389f41b73 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Tue, 24 Mar 2026 10:25:10 +0530 Subject: [PATCH 18/24] refractor(tools): use dsquery response types from grafana package --- pkg/grafana/datasource.go | 24 ++++---- tools/influxdb.go | 58 +++++++++--------- tools/influxdb_integration_test.go | 12 ++-- tools/influxdb_unit_test.go | 4 +- tools/prom_backend_cloudmonitoring.go | 52 ++++------------ tools/prom_backend_cloudmonitoring_test.go | 69 +++++++++++----------- 6 files changed, 95 insertions(+), 124 deletions(-) diff --git a/pkg/grafana/datasource.go b/pkg/grafana/datasource.go index 919c2e93..0f721ac1 100644 --- a/pkg/grafana/datasource.go +++ b/pkg/grafana/datasource.go @@ -17,21 +17,23 @@ type DsQueryFrameField struct { TypeInfo struct { Frame string `json:"frame,omitempty"` } `json:"typeInfo,omitempty"` - Labels struct { - Field string `json:"_field,omitempty"` - } `json:"labels"` + Labels map[string]string `json:"labels"` Config map[string]interface{} `json:"config,omitempty"` } +type DsQueryFrameSchema struct { + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty"` + Fields []DsQueryFrameField `json:"fields"` +} + +type DSQueryFrameData struct { + Values [][]interface{} `json:"values"` +} + type DsQueryFrame struct { - Schema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []DsQueryFrameField `json:"fields"` - } `json:"schema,omitempty"` - Data struct { - Values [][]interface{} `json:"values"` - } `json:"data"` + Schema DsQueryFrameSchema `json:"schema,omitempty"` + Data DSQueryFrameData `json:"data"` } type DsQueryResult struct { diff --git a/tools/influxdb.go b/tools/influxdb.go index 3cd2c9c0..da4bc58c 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -43,6 +43,10 @@ const ( InfluxQLQueryType = "InfluxQL" ) +// InfluxQL query supports limits in the format +// LIMIT %d | LIMIT %d OFFSET %d, Unsupported: LIMIT %d,%d +var limitRegEx = regexp.MustCompile(`(?i)(limit\s+)\d+(\s+offset\s+\d+)?(\s*$)`) + type influxDBClient struct { httpClient *http.Client baseURL string @@ -222,8 +226,8 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, } func enforceQueryLimit(args *InfluxQueryArgs) { - //flux, influxql limits per measurement(influxql), table(flux) level so number of measurements * limit is final records - //sql limit applies on final records level + // flux, influxql limits per measurement(influxql), table(flux) level so number of measurements * limit is final records + // sql limit applies on final records level limit := InfluxDBDefaultLimit @@ -235,26 +239,23 @@ func enforceQueryLimit(args *InfluxQueryArgs) { switch args.QueryType { case SQLQueryType: - //wrap query and apply limit + // wrap query and apply limit query := strings.TrimSuffix(args.Query, ";") args.Query = "(" + query + ")" + fmt.Sprintf(" LIMIT %d", limit) case InfluxQLQueryType: - //InfluxQL query supports limits in the format - //LIMIT %d | LIMIT %d OFFSET %d, Unsupported: LIMIT %d,%d (from manually testing queries) - limitWithOffset := regexp.MustCompile(`(?i)(limit\s+)\d+(\s+offset\s+\d+)?(\s*$)`) - - if limitWithOffset.Match([]byte(args.Query)) { - + // override limits when query contains limit + if limitRegEx.Match([]byte(args.Query)) { replacement := fmt.Sprintf("${1}%d${2}${3}", limit) - args.Query = limitWithOffset.ReplaceAllString(args.Query, replacement) - + args.Query = limitRegEx.ReplaceAllString(args.Query, replacement) } else { - args.Query = args.Query + fmt.Sprintf(" LIMIT %d", limit) + // append limit in other cases + query := strings.TrimSuffix(args.Query, ";") + args.Query = query + fmt.Sprintf(" LIMIT %d", limit) } case FluxQueryType: - //A query can execute selection of multiple tables - //flux |>limit() operator applies limit per table or group + // A query can execute selection of multiple tables + // flux |>limit() operator applies limit per table or group args.Query = strings.TrimSpace(args.Query) + fmt.Sprintf("\n|>limit(n:%d)", limit) } @@ -277,7 +278,7 @@ func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { fromTime = parsed } - //set relative end time 1hour from start + // set relative end time 1hour from start if end == "" { toTime = fromTime.Add(defaultPeriod) } @@ -304,7 +305,6 @@ func parseTimeRange(start string, end string) (*time.Time, *time.Time, error) { // parseQueryResponseFrames parses ds/query response in a json key-pair format // returns list of frames combined of query results // treats empty results as an error - func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResFrame, error) { frames := make([]*InfluxQueryResFrame, 0) hasResults := false @@ -324,7 +324,7 @@ func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResF noOfCol := len(frame.Schema.Fields) if noOfCol == 0 { - //columns not found for frame, skip frame + // columns not found for frame, skip frame continue } @@ -335,7 +335,7 @@ func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResF continue } - //Number of rows count derived from count of values of first column + // Number of rows count derived from count of values of first column rowCount := (len(frame.Data.Values[0])) resFrame.RowCount = uint(rowCount) resFrame.Rows = make([]map[string]any, 0, rowCount) @@ -345,11 +345,11 @@ func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResF fieldName := field.Name - if field.Labels.Field != "" && field.Name == "_value" { - //use field name for column values of flux queries - fieldName = field.Labels.Field + if field.Labels["_field"] != "" && field.Name == "_value" { + // use field name for column values of flux queries + fieldName = field.Labels["_field"] } - //influxql query with 'time_series' format query + // influxql query with 'time_series' format query if field.Config != nil { if displayName, ok := field.Config["displayNameFromDS"].(string); ok && displayName != "" { fieldName = displayName @@ -466,7 +466,7 @@ func extractColValues(resp *grafana.DSQueryResponse, colName string) (*[]string, } if fieldColIdx == -1 { - //no bucket name col found + // no bucket name col found continue } @@ -521,7 +521,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e result := ListBucketResult{} if len(*buckets) == 0 { - //return empty result hints + // return empty result hints result.Hints = GenerateEmptyResultHints(HintContext{ DatasourceType: InfluxDBDataSourceType, Query: query, @@ -578,7 +578,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas return nil, fmt.Errorf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType) } var query string - //represents column key of measurement in response + // represents column key of measurement in response var colKey string switch queryType { case SQLQueryType: @@ -611,7 +611,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas result := ListMeasurementResult{} if len(*measurements) == 0 { - //add empty results hints + // add empty results hints result.Hints = GenerateEmptyResultHints(HintContext{ DatasourceType: InfluxDBDataSourceType, Query: query, @@ -724,7 +724,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, result := ListTagKeysResult{} if len(*tags) == 0 { - //add empty results hints + // add empty results hints result.Hints = GenerateEmptyResultHints(HintContext{ DatasourceType: InfluxDBDataSourceType, Query: query, @@ -790,7 +790,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR switch queryType { case SQLQueryType: - //data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES + // data_type 'Dictionary%%' distinguishes tags from fields for SQL QUERIES query = fmt.Sprintf("SELECT column_name FROM information_schema.columns WHERE table_schema = 'iox' AND table_name = %s AND data_type NOT LIKE 'Dictionary%%' ORDER BY column_name LIMIT %d", quoteStringAsLiteral(args.Measurement), args.Limit) fieldColumnKey = "column_name" @@ -822,7 +822,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR result := ListFieldKeysResult{} if len(*fieldKeys) == 0 { - //add empty results hints + // add empty results hints result.Hints = GenerateEmptyResultHints(HintContext{ DatasourceType: InfluxDBDataSourceType, Query: query, diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index 519733b1..7bc004b4 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -28,7 +28,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-sql", }) - require.EqualError(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + require.EqualError(t, err, "datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) t.Run("error for InfluxQL linked Datasource", func(t *testing.T) { @@ -36,7 +36,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-influxql", }) - require.EqualError(t, err, "Datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + require.EqualError(t, err, "datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") }) } // Test_Query verifies querying InfluxDB with Flux, SQL and InfluxQL query languages. @@ -131,7 +131,7 @@ func Test_ListMeasurements(t *testing.T) { _, err := listMeasurements(ctx, ListMeasurementsArgs{ DatasourceUID: "influxdb-flux", }) - require.EqualError(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("bucket optional for SQL/InfluxQL Datasource", func(t *testing.T) { @@ -174,7 +174,7 @@ func Test_ListTagKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.EqualError(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("list tags keys", func(t *testing.T) { @@ -234,7 +234,7 @@ func Test_ListFieldKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.EqualError(t, err, fmt.Sprintf("Bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) }) t.Run("list field keys", func(t *testing.T) { @@ -386,7 +386,7 @@ func Test_Limit(t *testing.T) { |> set(key: "tag", value: "bar") |> group(columns: ["tag"]) - union(tables: [t1, t2])//user comments should not bypass limits + union(tables: [t1, t2])// user comments should not bypass limits `, DatasourceUID: "influxdb-flux", QueryType: FluxQueryType, diff --git a/tools/influxdb_unit_test.go b/tools/influxdb_unit_test.go index 17c5b948..226ee6c8 100644 --- a/tools/influxdb_unit_test.go +++ b/tools/influxdb_unit_test.go @@ -207,8 +207,8 @@ func Test_parseQueryResponseFrames(t *testing.T) { t.Run("test_response", func(t *testing.T) { t.Run("should parse frames successfully", func(t *testing.T) { field1 := grafana.DsQueryFrameField{Name: "time"} - field2 := grafana.DsQueryFrameField{Name: "_value"} - field2.Labels.Field = "temp" + field2 := grafana.DsQueryFrameField{Name: "_value", Labels: make(map[string]string)} + field2.Labels["_field"] = "temp" resp := &grafana.DSQueryResponse{ Results: map[string]grafana.DsQueryResult{ diff --git a/tools/prom_backend_cloudmonitoring.go b/tools/prom_backend_cloudmonitoring.go index 7645a3b3..93fa88c3 100644 --- a/tools/prom_backend_cloudmonitoring.go +++ b/tools/prom_backend_cloudmonitoring.go @@ -11,8 +11,9 @@ import ( "strings" "time" - mcpgrafana "github.com/grafana/mcp-grafana" "github.com/grafana/grafana-openapi-client-go/models" + mcpgrafana "github.com/grafana/mcp-grafana" + "github.com/grafana/mcp-grafana/pkg/grafana" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" ) @@ -273,7 +274,7 @@ func (b *cloudMonitoringBackend) labelValuesViaQuery(ctx context.Context, labelN } // doDSQuery executes a request against Grafana's /api/ds/query endpoint. -func (b *cloudMonitoringBackend) doDSQuery(ctx context.Context, payload map[string]interface{}) (*dsQueryResponse, error) { +func (b *cloudMonitoringBackend) doDSQuery(ctx context.Context, payload map[string]interface{}) (*grafana.DSQueryResponse, error) { payloadBytes, err := json.Marshal(payload) if err != nil { return nil, fmt.Errorf("marshaling query payload: %w", err) @@ -300,7 +301,7 @@ func (b *cloudMonitoringBackend) doDSQuery(ctx context.Context, payload map[stri return nil, fmt.Errorf("query returned status %d: %s", resp.StatusCode, string(body[:min(len(body), 1024)])) } - var queryResp dsQueryResponse + var queryResp grafana.DSQueryResponse if err := json.Unmarshal(body, &queryResp); err != nil { return nil, fmt.Errorf("unmarshaling response: %w", err) } @@ -359,43 +360,10 @@ type gcpMetricDescriptor struct { ServiceShortName string `json:"serviceShortName,omitempty"` } -// --- /api/ds/query response types --- - -type dsQueryResponse struct { - Results map[string]dsQueryResult `json:"results"` -} - -type dsQueryResult struct { - Status int `json:"status,omitempty"` - Frames []dsQueryFrame `json:"frames,omitempty"` - Error string `json:"error,omitempty"` -} - -type dsQueryFrame struct { - Schema dsQueryFrameSchema `json:"schema"` - Data dsQueryFrameData `json:"data"` -} - -type dsQueryFrameSchema struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []dsQueryFrameField `json:"fields"` -} - -type dsQueryFrameField struct { - Name string `json:"name"` - Type string `json:"type"` - Labels map[string]string `json:"labels,omitempty"` -} - -type dsQueryFrameData struct { - Values [][]interface{} `json:"values"` -} - // --- Frame conversion --- // framesToPrometheusValue converts /api/ds/query response frames to Prometheus model values. -func framesToPrometheusValue(resp *dsQueryResponse, queryType string) (model.Value, error) { +func framesToPrometheusValue(resp *grafana.DSQueryResponse, queryType string) (model.Value, error) { r, ok := resp.Results["A"] if !ok { if queryType == "instant" { @@ -414,7 +382,7 @@ func framesToPrometheusValue(resp *dsQueryResponse, queryType string) (model.Val return framesToMatrix(r.Frames) } -func framesToMatrix(frames []dsQueryFrame) (model.Matrix, error) { +func framesToMatrix(frames []grafana.DsQueryFrame) (model.Matrix, error) { var matrix model.Matrix for _, frame := range frames { timeIdx, valueIdx := findTimeAndValueFields(frame.Schema.Fields) @@ -457,7 +425,7 @@ func framesToMatrix(frames []dsQueryFrame) (model.Matrix, error) { return matrix, nil } -func framesToVector(frames []dsQueryFrame) (model.Vector, error) { +func framesToVector(frames []grafana.DsQueryFrame) (model.Vector, error) { var vector model.Vector for _, frame := range frames { timeIdx, valueIdx := findTimeAndValueFields(frame.Schema.Fields) @@ -497,7 +465,7 @@ func framesToVector(frames []dsQueryFrame) (model.Vector, error) { return vector, nil } -func findTimeAndValueFields(fields []dsQueryFrameField) (timeIdx, valueIdx int) { +func findTimeAndValueFields(fields []grafana.DsQueryFrameField) (timeIdx, valueIdx int) { timeIdx = -1 valueIdx = -1 for i, f := range fields { @@ -549,7 +517,7 @@ func toFloat(v interface{}) (float64, bool) { } // extractLabelNamesFromFrames extracts unique label keys from HEADERS query frames. -func extractLabelNamesFromFrames(resp *dsQueryResponse) []string { +func extractLabelNamesFromFrames(resp *grafana.DSQueryResponse) []string { seen := make(map[string]bool) r, ok := resp.Results["A"] if !ok { @@ -574,7 +542,7 @@ func extractLabelNamesFromFrames(resp *dsQueryResponse) []string { } // extractLabelValuesFromFrames extracts unique values for a label from HEADERS query frames. -func extractLabelValuesFromFrames(resp *dsQueryResponse, labelName string) []string { +func extractLabelValuesFromFrames(resp *grafana.DSQueryResponse, labelName string) []string { seen := make(map[string]bool) r, ok := resp.Results["A"] if !ok { diff --git a/tools/prom_backend_cloudmonitoring_test.go b/tools/prom_backend_cloudmonitoring_test.go index 0b05ffd7..431aaab8 100644 --- a/tools/prom_backend_cloudmonitoring_test.go +++ b/tools/prom_backend_cloudmonitoring_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/grafana/grafana-openapi-client-go/models" + "github.com/grafana/mcp-grafana/pkg/grafana" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" @@ -14,16 +15,16 @@ import ( func TestFramesToMatrix(t *testing.T) { t.Run("single series", func(t *testing.T) { - frames := []dsQueryFrame{ + frames := []grafana.DsQueryFrame{ { - Schema: dsQueryFrameSchema{ + Schema: grafana.DsQueryFrameSchema{ Name: "cpu_usage", - Fields: []dsQueryFrameField{ + Fields: []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "a"}}, }, }, - Data: dsQueryFrameData{ + Data: grafana.DSQueryFrameData{ Values: [][]interface{}{ {float64(1000), float64(2000), float64(3000)}, {float64(0.5), float64(0.7), float64(0.9)}, @@ -43,20 +44,20 @@ func TestFramesToMatrix(t *testing.T) { }) t.Run("multiple series", func(t *testing.T) { - frames := []dsQueryFrame{ + frames := []grafana.DsQueryFrame{ { - Schema: dsQueryFrameSchema{ + Schema: grafana.DsQueryFrameSchema{ Name: "cpu", - Fields: []dsQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "a"}}}, + Fields: []grafana.DsQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "a"}}}, }, - Data: dsQueryFrameData{Values: [][]interface{}{{float64(1000)}, {float64(0.5)}}}, + Data: grafana.DSQueryFrameData{Values: [][]interface{}{{float64(1000)}, {float64(0.5)}}}, }, { - Schema: dsQueryFrameSchema{ + Schema: grafana.DsQueryFrameSchema{ Name: "cpu", - Fields: []dsQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "b"}}}, + Fields: []grafana.DsQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "b"}}}, }, - Data: dsQueryFrameData{Values: [][]interface{}{{float64(1000)}, {float64(0.8)}}}, + Data: grafana.DSQueryFrameData{Values: [][]interface{}{{float64(1000)}, {float64(0.8)}}}, }, } @@ -72,12 +73,12 @@ func TestFramesToMatrix(t *testing.T) { }) t.Run("frame missing time field", func(t *testing.T) { - frames := []dsQueryFrame{ + frames := []grafana.DsQueryFrame{ { - Schema: dsQueryFrameSchema{ - Fields: []dsQueryFrameField{{Name: "Value", Type: "number"}}, + Schema: grafana.DsQueryFrameSchema{ + Fields: []grafana.DsQueryFrameField{{Name: "Value", Type: "number"}}, }, - Data: dsQueryFrameData{Values: [][]interface{}{{float64(1.0)}}}, + Data: grafana.DSQueryFrameData{Values: [][]interface{}{{float64(1.0)}}}, }, } @@ -89,16 +90,16 @@ func TestFramesToMatrix(t *testing.T) { func TestFramesToVector(t *testing.T) { t.Run("single sample", func(t *testing.T) { - frames := []dsQueryFrame{ + frames := []grafana.DsQueryFrame{ { - Schema: dsQueryFrameSchema{ + Schema: grafana.DsQueryFrameSchema{ Name: "up", - Fields: []dsQueryFrameField{ + Fields: []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"job": "prometheus"}}, }, }, - Data: dsQueryFrameData{ + Data: grafana.DSQueryFrameData{ Values: [][]interface{}{ {float64(5000)}, {float64(1.0)}, @@ -117,15 +118,15 @@ func TestFramesToVector(t *testing.T) { }) t.Run("takes last value from multi-point frame", func(t *testing.T) { - frames := []dsQueryFrame{ + frames := []grafana.DsQueryFrame{ { - Schema: dsQueryFrameSchema{ - Fields: []dsQueryFrameField{ + Schema: grafana.DsQueryFrameSchema{ + Fields: []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number"}, }, }, - Data: dsQueryFrameData{ + Data: grafana.DSQueryFrameData{ Values: [][]interface{}{ {float64(1000), float64(2000), float64(3000)}, {float64(1.0), float64(2.0), float64(3.0)}, @@ -150,7 +151,7 @@ func TestFramesToVector(t *testing.T) { func TestFramesToPrometheusValue(t *testing.T) { t.Run("missing refId returns empty", func(t *testing.T) { - resp := &dsQueryResponse{Results: map[string]dsQueryResult{}} + resp := &grafana.DSQueryResponse{Results: map[string]grafana.DsQueryResult{}} v, err := framesToPrometheusValue(resp, "range") require.NoError(t, err) assert.Equal(t, model.Matrix{}, v) @@ -161,7 +162,7 @@ func TestFramesToPrometheusValue(t *testing.T) { }) t.Run("error in result", func(t *testing.T) { - resp := &dsQueryResponse{Results: map[string]dsQueryResult{ + resp := &grafana.DSQueryResponse{Results: map[string]grafana.DsQueryResult{ "A": {Error: "something went wrong"}, }} _, err := framesToPrometheusValue(resp, "range") @@ -234,29 +235,29 @@ func TestMapGCPMetricKind(t *testing.T) { } func TestExtractLabelValuesFromFrames(t *testing.T) { - resp := &dsQueryResponse{ - Results: map[string]dsQueryResult{ + resp := &grafana.DSQueryResponse{ + Results: map[string]grafana.DsQueryResult{ "A": { - Frames: []dsQueryFrame{ + Frames: []grafana.DsQueryFrame{ { - Schema: dsQueryFrameSchema{ - Fields: []dsQueryFrameField{ + Schema: grafana.DsQueryFrameSchema{ + Fields: []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"zone": "us-east1-b", "project_id": "my-project"}}, }, }, }, { - Schema: dsQueryFrameSchema{ - Fields: []dsQueryFrameField{ + Schema: grafana.DsQueryFrameSchema{ + Fields: []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"zone": "us-west1-a", "project_id": "my-project"}}, }, }, }, { - Schema: dsQueryFrameSchema{ - Fields: []dsQueryFrameField{ + Schema: grafana.DsQueryFrameSchema{ + Fields: []grafana.DsQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"zone": "us-east1-b", "project_id": "other-project"}}, }, From 18bd52bb0fa10f3615da1175225dcbef1e3b6d30 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Tue, 24 Mar 2026 11:38:39 +0530 Subject: [PATCH 19/24] refactor: rename fluxql to flux in influxdb tool descriptions and tests --- tools/influxdb.go | 16 ++++++++-------- tools/influxdb_integration_test.go | 14 +++++++------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index da4bc58c..3b2b40db 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -497,7 +497,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e if err != nil { if sourceQueryType != "" && sourceQueryType != queryType { - return nil, fmt.Errorf("datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + return nil, fmt.Errorf("datasource is not configured with Flux, bucket listing is explicit to Flux linked datasources") } return nil, err } @@ -539,7 +539,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e var ListBucketsInflux = mcpgrafana.MustTool( "list_influxdb_buckets", - "Lists buckets of an InfluxDB datasource identified by its UID. Requires the datasource to be configured with FluxQL. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb", + "Lists buckets of an InfluxDB datasource identified by its UID. Requires the datasource to be configured with Flux. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb", listBuckets, mcp.WithTitleAnnotation("List Buckets InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -548,7 +548,7 @@ var ListBucketsInflux = mcpgrafana.MustTool( type ListMeasurementsArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from; required only for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from; required only for Flux linked datasources."` Limit uint `json:"limit"` } @@ -629,7 +629,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas var ListMeasurements = mcpgrafana.MustTool( "list_measurements_influxdb", - "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for FluxQL linked datasource) -> list_measurements_influxdb", + "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for Flux linked datasource) -> list_measurements_influxdb", listMeasurements, mcp.WithTitleAnnotation("List Measurements InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -638,7 +638,7 @@ var ListMeasurements = mcpgrafana.MustTool( type ListTagKeysArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from\\,required only for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from\\,required only for Flux linked datasources."` Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` Limit uint `json:"limit"` } @@ -742,7 +742,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, var ListTagKeys = mcpgrafana.MustTool( "list_tag_keys_influxdb", - "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for FluxQL linked datasource) -> list_measurements_influxdb -> list_tag_keys_influxdb", + "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for Flux linked datasource) -> list_measurements_influxdb -> list_tag_keys_influxdb", listTagKeys, mcp.WithTitleAnnotation("List Tag Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -751,7 +751,7 @@ var ListTagKeys = mcpgrafana.MustTool( type ListFieldKeysArgs struct { DatasourceUID string `json:"datasourceUid" jsonschema:"required,description=The UID of the InfluxDB datasource. Use list_datasources to find available UIDs."` - Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from\\,required only for FluxQL linked datasources."` + Bucket string `json:"bucket,omitempty" jsonschema:"optional,description=Bucket Name of target bucket to fetch from\\,required only for Flux linked datasources."` Measurement string `json:"measurement" jsonschema:"required,description=Filter by measurement"` Limit uint `json:"limit"` } @@ -840,7 +840,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR var ListFieldKeys = mcpgrafana.MustTool( "list_field_keys_influxdb", - "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for FluxQL linked datasource) -> list_measurements_influxdb -> list_field_keys_influxdb", + "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for Flux linked datasource) -> list_measurements_influxdb -> list_field_keys_influxdb", listFieldKeys, mcp.WithTitleAnnotation("List Field Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index 7bc004b4..6cad70e2 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -12,7 +12,7 @@ import ( // Test_ListBuckets verifies the listing of buckets for different InfluxDB datasource linked types. func Test_ListBuckets(t *testing.T) { - t.Run("list buckets for FluxQL linked DataSource", func(t *testing.T) { + t.Run("list buckets for Flux linked DataSource", func(t *testing.T) { ctx := newTestContext() result, err := listBuckets(ctx, ListBucketArgs{ @@ -20,7 +20,7 @@ func Test_ListBuckets(t *testing.T) { }) require.NoError(t, err) - assert.Contains(t, *result.Buckets, "b-system-logs", "should list buckets for FluxQL DataSource") + assert.Contains(t, *result.Buckets, "b-system-logs", "should list buckets for Flux DataSource") }) t.Run("error for SQL linked Datasource", func(t *testing.T) { @@ -28,7 +28,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-sql", }) - require.EqualError(t, err, "datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + require.EqualError(t, err, "datasource is not configured with Flux, bucket listing is explicit to Flux linked datasources") }) t.Run("error for InfluxQL linked Datasource", func(t *testing.T) { @@ -36,7 +36,7 @@ func Test_ListBuckets(t *testing.T) { _, err := listBuckets(ctx, ListBucketArgs{ DatasourceUID: "influxdb-influxql", }) - require.EqualError(t, err, "datasource is not configured with FluxQL, bucket listing is explicit to FluxQL linked datasources") + require.EqualError(t, err, "datasource is not configured with Flux, bucket listing is explicit to Flux linked datasources") }) } // Test_Query verifies querying InfluxDB with Flux, SQL and InfluxQL query languages. @@ -126,7 +126,7 @@ func Test_Query(t *testing.T) { } // Test_ListMeasurements verifies the listing of measurements for different InfluxDB datasource linked types. func Test_ListMeasurements(t *testing.T) { - t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { + t.Run("require bucket for Flux Datasource", func(t *testing.T) { ctx := newTestContext() _, err := listMeasurements(ctx, ListMeasurementsArgs{ DatasourceUID: "influxdb-flux", @@ -168,7 +168,7 @@ func Test_ListMeasurements(t *testing.T) { // Test_ListTagKeys verifies the listing of tag keys for different InfluxDB datasource linked types. func Test_ListTagKeys(t *testing.T) { - t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { + t.Run("require bucket for Flux Datasource", func(t *testing.T) { ctx := newTestContext() _, err := listTagKeys(ctx, ListTagKeysArgs{ DatasourceUID: "influxdb-flux", @@ -228,7 +228,7 @@ func Test_ListTagKeys(t *testing.T) { // Test_ListFieldKeys verifies the listing of field keys for different InfluxDB datasource linked types. func Test_ListFieldKeys(t *testing.T) { - t.Run("require bucket for FluxQL Datasource", func(t *testing.T) { + t.Run("require bucket for Flux Datasource", func(t *testing.T) { ctx := newTestContext() _, err := listFieldKeys(ctx, ListFieldKeysArgs{ DatasourceUID: "influxdb-flux", From 60ce70857ab1ff36bebc074d31cce143787311dd Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Wed, 25 Mar 2026 10:26:33 +0530 Subject: [PATCH 20/24] chore(tools): replace manual slices cap increase operation with slices.grow --- tools/influxdb.go | 24 +++++++++++++----------- tools/influxdb_unit_test.go | 32 ++++++++------------------------ 2 files changed, 21 insertions(+), 35 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index 3b2b40db..5457de35 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -316,9 +316,8 @@ func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResF return nil, fmt.Errorf("query error (refId=%s): %s", refID, r.Error) } - clonedFrames := make([]*InfluxQueryResFrame, len(frames), len(frames)+len(r.Frames)) - copy(clonedFrames, frames) - frames = clonedFrames + // grow slice to accomadte atleast len(r.Frames) elements + frames = slices.Grow(frames, len(r.Frames)) for _, frame := range r.Frames { @@ -335,6 +334,11 @@ func parseQueryResponseFrames(resp *grafana.DSQueryResponse) ([]*InfluxQueryResF continue } + if len(frame.Data.Values) != noOfCol { + // return error when data values count mismatch schema fields + return nil, fmt.Errorf("frame data values count (%d) mismatch schema fields count (%d)", len(frame.Data.Values), noOfCol) + } + // Number of rows count derived from count of values of first column rowCount := (len(frame.Data.Values[0])) resFrame.RowCount = uint(rowCount) @@ -474,15 +478,13 @@ func extractColValues(resp *grafana.DSQueryResponse, colName string) (*[]string, continue } - resizedFieldValues := make([]string, len(fieldValues), len(fieldValues)+len(frame.Data.Values[fieldColIdx])) - copy(resizedFieldValues, fieldValues) - fieldValues = resizedFieldValues + fieldValues = slices.Grow(fieldValues, len(frame.Data.Values[fieldColIdx])) for _, name := range frame.Data.Values[fieldColIdx] { if s, ok := name.(string); ok { fieldValues = append(fieldValues, s) } else { - return nil, errors.New("expected column to be string type") + return nil, fmt.Errorf("expected column %s to be string type, got %T", colName, name) } } } @@ -539,7 +541,7 @@ func listBuckets(ctx context.Context, args ListBucketArgs) (*ListBucketResult, e var ListBucketsInflux = mcpgrafana.MustTool( "list_influxdb_buckets", - "Lists buckets of an InfluxDB datasource identified by its UID. Requires the datasource to be configured with Flux. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb", + "Lists buckets of an InfluxDB datasource identified by its UID. Requires the datasource to be configured with Flux. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets", listBuckets, mcp.WithTitleAnnotation("List Buckets InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -629,7 +631,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas var ListMeasurements = mcpgrafana.MustTool( "list_measurements_influxdb", - "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for Flux linked datasource) -> list_measurements_influxdb", + "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_measurements_influxdb", listMeasurements, mcp.WithTitleAnnotation("List Measurements InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -742,7 +744,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, var ListTagKeys = mcpgrafana.MustTool( "list_tag_keys_influxdb", - "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for Flux linked datasource) -> list_measurements_influxdb -> list_tag_keys_influxdb", + "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_measurements_influxdb -> list_tag_keys_influxdb", listTagKeys, mcp.WithTitleAnnotation("List Tag Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -840,7 +842,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR var ListFieldKeys = mcpgrafana.MustTool( "list_field_keys_influxdb", - "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_buckets_influxdb (required only for Flux linked datasource) -> list_measurements_influxdb -> list_field_keys_influxdb", + "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_measurements_influxdb -> list_field_keys_influxdb", listFieldKeys, mcp.WithTitleAnnotation("List Field Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), diff --git a/tools/influxdb_unit_test.go b/tools/influxdb_unit_test.go index 226ee6c8..2c12135b 100644 --- a/tools/influxdb_unit_test.go +++ b/tools/influxdb_unit_test.go @@ -1,3 +1,5 @@ +//go:build unit + package tools import ( @@ -158,18 +160,12 @@ func Test_extractColValues(t *testing.T) { "A": { Frames: []grafana.DsQueryFrame{ { - Schema: struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []grafana.DsQueryFrameField `json:"fields"` - }{ + Schema: grafana.DsQueryFrameSchema{ Fields: []grafana.DsQueryFrameField{ {Name: "my_col"}, }, }, - Data: struct { - Values [][]interface{} `json:"values"` - }{ + Data: grafana.DSQueryFrameData{ Values: [][]interface{}{ {"val1", "val2"}, }, @@ -215,20 +211,14 @@ func Test_parseQueryResponseFrames(t *testing.T) { "A": { Frames: []grafana.DsQueryFrame{ { - Schema: struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []grafana.DsQueryFrameField `json:"fields"` - }{ + Schema: grafana.DsQueryFrameSchema{ Name: "test_frame", Fields: []grafana.DsQueryFrameField{ field1, field2, }, }, - Data: struct { - Values [][]interface{} `json:"values"` - }{ + Data: grafana.DSQueryFrameData{ Values: [][]interface{}{ {1000, 2000}, {22.5, 23.0}, @@ -268,19 +258,13 @@ func Test_parseQueryResponseFrames(t *testing.T) { "A": { Frames: []grafana.DsQueryFrame{ { - Schema: struct { - Name string `json:"name,omitempty"` - RefID string `json:"refId,omitempty"` - Fields []grafana.DsQueryFrameField `json:"fields"` - }{ + Schema: grafana.DsQueryFrameSchema{ Name: "test_frame", Fields: []grafana.DsQueryFrameField{ {Name: "time"}, }, }, - Data: struct { - Values [][]interface{} `json:"values"` - }{ + Data: grafana.DSQueryFrameData{ Values: [][]interface{}{}, }, }, From 93bd6d7694d8df3703d431f54804db8ecda8493f Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Wed, 25 Mar 2026 20:21:44 +0530 Subject: [PATCH 21/24] fix(tools): fix failing integration tests --- tools/influxdb.go | 2 +- tools/influxdb_integration_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index 5457de35..9b908ea6 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -577,7 +577,7 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas enforceMeasurementsLimit(&args) if queryType == FluxQueryType && args.Bucket == "" { - return nil, fmt.Errorf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType) + return nil, fmt.Errorf("bucket is required for %s linked InfluxDB datasources", FluxQueryType) } var query string // represents column key of measurement in response diff --git a/tools/influxdb_integration_test.go b/tools/influxdb_integration_test.go index 6cad70e2..cf97265a 100644 --- a/tools/influxdb_integration_test.go +++ b/tools/influxdb_integration_test.go @@ -131,7 +131,7 @@ func Test_ListMeasurements(t *testing.T) { _, err := listMeasurements(ctx, ListMeasurementsArgs{ DatasourceUID: "influxdb-flux", }) - require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB datasources", FluxQueryType)) }) t.Run("bucket optional for SQL/InfluxQL Datasource", func(t *testing.T) { @@ -174,7 +174,7 @@ func Test_ListTagKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB datasources", FluxQueryType)) }) t.Run("list tags keys", func(t *testing.T) { @@ -234,7 +234,7 @@ func Test_ListFieldKeys(t *testing.T) { DatasourceUID: "influxdb-flux", Measurement: "auth_events", }) - require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB Datasources", FluxQueryType)) + require.EqualError(t, err, fmt.Sprintf("bucket is required for %s linked InfluxDB datasources", FluxQueryType)) }) t.Run("list field keys", func(t *testing.T) { From c40039550c9a348ce11bbebc7018a9527236da1c Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 30 Mar 2026 12:13:45 +0530 Subject: [PATCH 22/24] chore(tools): enhance query limit application to support cte influx --- cmd/mcp-grafana/main.go | 2 +- docker-compose.yaml | 2 +- pkg/grafana/datasource.go | 17 +- .../influxdb/.env.influxdb2-admin-password | 2 +- .../tools/influxdb/.env.influxdb2-admin-token | 2 +- .../influxdb/.env.influxdb2-admin-username | 2 +- testdata/tools/influxdb/influxdbv2-seed.sh | 2 +- testdata/tools/influxdb/influxdbv3-seed.sh | 2 +- testdata/tools/influxdb/user | 0 tools/cloudwatch_test.go | 14 +- tools/influxdb.go | 175 ++++++++++++---- tools/influxdb_unit_test.go | 190 ++++++++++++++++-- tools/prom_backend_cloudmonitoring.go | 6 +- tools/prom_backend_cloudmonitoring_test.go | 54 ++--- 14 files changed, 359 insertions(+), 111 deletions(-) delete mode 100644 testdata/tools/influxdb/user diff --git a/cmd/mcp-grafana/main.go b/cmd/mcp-grafana/main.go index 4fae08dd..13036cbc 100644 --- a/cmd/mcp-grafana/main.go +++ b/cmd/mcp-grafana/main.go @@ -185,7 +185,7 @@ Available Capabilities: - Prometheus & Loki: Run PromQL and LogQL queries, retrieve metric/log metadata, and explore label names/values. - ClickHouse: Query ClickHouse datasources via Grafana with macro and variable substitution support. - Elasticsearch: Query Elasticsearch datasources using Lucene syntax or Query DSL for logs and metrics. -- InfluxDB : Query InfluxDB datasourcs with SQL , InfluxQL , Flux languages +- InfluxDB: Query InfluxDB datasources with SQL, InfluxQL, Flux languages - Incidents: Search, create, update, and resolve incidents in Grafana Incident. - Sift Investigations: Start and manage Sift investigations, analyze logs/traces, find error patterns, and detect slow requests. - Alerting: List and fetch alert rules and notification contact points. diff --git a/docker-compose.yaml b/docker-compose.yaml index ae7ee79d..38063c6b 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -173,4 +173,4 @@ secrets: file: ./testdata/tools/influxdb/.env.influxdb2-admin-token admin-token: file: ./testdata/tools/influxdb/admin-token.json - \ No newline at end of file + diff --git a/pkg/grafana/datasource.go b/pkg/grafana/datasource.go index 0f721ac1..187c71df 100644 --- a/pkg/grafana/datasource.go +++ b/pkg/grafana/datasource.go @@ -1,4 +1,3 @@ -// package grafana package grafana import "errors" @@ -11,7 +10,7 @@ type DSQueryPayload struct { To string `json:"to"` } -type DsQueryFrameField struct { +type DSQueryFrameField struct { Name string `json:"name"` Type string `json:"type"` TypeInfo struct { @@ -21,28 +20,28 @@ type DsQueryFrameField struct { Config map[string]interface{} `json:"config,omitempty"` } -type DsQueryFrameSchema struct { +type DSQueryFrameSchema struct { Name string `json:"name,omitempty"` RefID string `json:"refId,omitempty"` - Fields []DsQueryFrameField `json:"fields"` + Fields []DSQueryFrameField `json:"fields"` } type DSQueryFrameData struct { Values [][]interface{} `json:"values"` } -type DsQueryFrame struct { - Schema DsQueryFrameSchema `json:"schema,omitempty"` +type DSQueryFrame struct { + Schema DSQueryFrameSchema `json:"schema,omitempty"` Data DSQueryFrameData `json:"data"` } -type DsQueryResult struct { +type DSQueryResult struct { Status int `json:"status,omitempty"` - Frames []DsQueryFrame `json:"frames,omitempty"` + Frames []DSQueryFrame `json:"frames,omitempty"` Error string `json:"error,omitempty"` } // DSQueryResponse represents the raw API response from Grafana's /api/ds/query type DSQueryResponse struct { - Results map[string]DsQueryResult `json:"results"` + Results map[string]DSQueryResult `json:"results"` } diff --git a/testdata/tools/influxdb/.env.influxdb2-admin-password b/testdata/tools/influxdb/.env.influxdb2-admin-password index 7aa311ad..f3097ab1 100644 --- a/testdata/tools/influxdb/.env.influxdb2-admin-password +++ b/testdata/tools/influxdb/.env.influxdb2-admin-password @@ -1 +1 @@ -password \ No newline at end of file +password diff --git a/testdata/tools/influxdb/.env.influxdb2-admin-token b/testdata/tools/influxdb/.env.influxdb2-admin-token index 029d54ef..fae5b667 100644 --- a/testdata/tools/influxdb/.env.influxdb2-admin-token +++ b/testdata/tools/influxdb/.env.influxdb2-admin-token @@ -1 +1 @@ -admintoken \ No newline at end of file +admintoken diff --git a/testdata/tools/influxdb/.env.influxdb2-admin-username b/testdata/tools/influxdb/.env.influxdb2-admin-username index f77b0040..7fbe952b 100644 --- a/testdata/tools/influxdb/.env.influxdb2-admin-username +++ b/testdata/tools/influxdb/.env.influxdb2-admin-username @@ -1 +1 @@ -admin \ No newline at end of file +admin diff --git a/testdata/tools/influxdb/influxdbv2-seed.sh b/testdata/tools/influxdb/influxdbv2-seed.sh index 478f59b0..325dbe2f 100755 --- a/testdata/tools/influxdb/influxdbv2-seed.sh +++ b/testdata/tools/influxdb/influxdbv2-seed.sh @@ -80,4 +80,4 @@ echo "" echo "✅ Seeding complete." echo " Host: $INFLUX_HOST" echo " Org: $ORG_NAME" -echo " Buckets: $BUCKET_NAME" \ No newline at end of file +echo " Buckets: $BUCKET_NAME" diff --git a/testdata/tools/influxdb/influxdbv3-seed.sh b/testdata/tools/influxdb/influxdbv3-seed.sh index 52cf353a..16a9843f 100644 --- a/testdata/tools/influxdb/influxdbv3-seed.sh +++ b/testdata/tools/influxdb/influxdbv3-seed.sh @@ -66,4 +66,4 @@ queue_stats,app=worker,queue=sms pending=10,processed=980,failed=0,dlq_size=0 $M queue_stats,app=worker,queue=notifications pending=500,processed=4000,failed=12,dlq_size=12 $M4 EOF -echo "✅ Done! Seeded all data into: $DB_NAME" \ No newline at end of file +echo "✅ Done! Seeded all data into: $DB_NAME" diff --git a/testdata/tools/influxdb/user b/testdata/tools/influxdb/user deleted file mode 100644 index e69de29b..00000000 diff --git a/tools/cloudwatch_test.go b/tools/cloudwatch_test.go index 6d8f4203..f83a883c 100644 --- a/tools/cloudwatch_test.go +++ b/tools/cloudwatch_test.go @@ -253,12 +253,12 @@ func TestCloudWatchMultiFrameStatistics(t *testing.T) { // Build a cloudWatchQueryResponse with 2 frames to verify statistics // are accumulated across all frames, not just the last one. resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{}, + Results: map[string]grafana.DSQueryResult{}, } // Frame 1: values 10, 20 (sum=30, min=10, max=20) - f1 := grafana.DsQueryFrame{} - f1.Schema.Fields = []grafana.DsQueryFrameField{ + f1 := grafana.DSQueryFrame{} + f1.Schema.Fields = []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number"}, } @@ -268,8 +268,8 @@ func TestCloudWatchMultiFrameStatistics(t *testing.T) { } // Frame 2: values 5, 40 (sum=45, min=5, max=40) - f2 := grafana.DsQueryFrame{} - f2.Schema.Fields = []grafana.DsQueryFrameField{ + f2 := grafana.DSQueryFrame{} + f2.Schema.Fields = []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number"}, } @@ -278,8 +278,8 @@ func TestCloudWatchMultiFrameStatistics(t *testing.T) { {float64(5.0), float64(40.0)}, // values } - resp.Results["A"] = grafana.DsQueryResult{ - Frames: []grafana.DsQueryFrame{f1, f2}, + resp.Results["A"] = grafana.DSQueryResult{ + Frames: []grafana.DSQueryFrame{f1, f2}, } // Process the response the same way queryCloudWatch does diff --git a/tools/influxdb.go b/tools/influxdb.go index 9b908ea6..b549489b 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -36,6 +36,13 @@ const ( InfluxDBTagsMaxLimit uint = 1000 ) +const ( + // influxDBResponseLimitBytes is the max limit for successful query responses (10MB) + influxDBResponseLimitBytes = 1024 * 1024 * 10 + // influxDBErrorResponseLimitBytes is the max limit for error responses (1MB) + influxDBErrorResponseLimitBytes = 1024 * 1024 +) + // Supported query types for the InfluxDB client. const ( FluxQueryType = "Flux" @@ -43,9 +50,23 @@ const ( InfluxQLQueryType = "InfluxQL" ) -// InfluxQL query supports limits in the format -// LIMIT %d | LIMIT %d OFFSET %d, Unsupported: LIMIT %d,%d -var limitRegEx = regexp.MustCompile(`(?i)(limit\s+)\d+(\s+offset\s+\d+)?(\s*$)`) +// Regex expressions used for query parsing and limit enforcement. +var ( + // influxQLLimitRegEx matches InfluxQL LIMIT and optional OFFSET clauses. + influxQLLimitRegEx = regexp.MustCompile(`(?i)(limit\s+)\d+(\s+offset\s+\d+)?(\s*$)`) + + // sqlCTEStartRegEx matches the start of a CTE (WITH clause). + sqlCTEStartRegEx = regexp.MustCompile(`(?i)^\s*WITH\b`) + + // sqlKeywordRegEx matches standard SQL keywords that follow a CTE. + sqlKeywordRegEx = regexp.MustCompile(`(?i)^(SELECT|INSERT|UPDATE|DELETE|MERGE|TRUNCATE)\b`) + + // sqlLimitRegEx matches a SQL LIMIT clause. + sqlLimitRegEx = regexp.MustCompile(`(?i)\bLIMIT\s+\d+`) + + // fluxLimitRegEx matches a Flux limit operator at the end of a query. + fluxLimitRegEx = regexp.MustCompile(`(?i)\|>\s*limit\s*\(\s*n\s*:\s*\d+\s*\)\s*$`) +) type influxDBClient struct { httpClient *http.Client @@ -133,23 +154,20 @@ type InfluxQueryResult struct { Hints *EmptyResultHints `json:"hints,omitempty"` } -type InfluxQLQuery struct { - Datasource DatasourceRef `json:"datasource"` - RefID string `json:"refId"` - Type string `json:"type"` - Format string `json:"format"` - IntervalMs uint `json:"intervalMs"` - Query string `json:"query"` - RawSQL string `json:"rawSql"` - RawQuery bool `json:"rawQuery"` - Limit string `json:"limit"` - ResultFormat string `json:"resultFormat"` -} - -// DatasourceRef encapsulates the unique identifier and type of a Grafana data source. -type DatasourceRef struct { - UID string `json:"uid"` - Type string `json:"type"` +type influxDBQueryPayload struct { + Datasource struct { + UID string `json:"uid"` + Type string `json:"type"` + } `json:"datasource"` + RefID string `json:"refId"` + Type string `json:"type"` + Format string `json:"format"` + IntervalMs uint `json:"intervalMs"` + Query string `json:"query"` + RawSQL string `json:"rawSql"` + RawQuery bool `json:"rawQuery"` + Limit string `json:"limit"` + ResultFormat string `json:"resultFormat"` } func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, to time.Time) (*grafana.DSQueryResponse, error) { @@ -159,8 +177,11 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, format = "table" } - query := InfluxQLQuery{ - Datasource: DatasourceRef{ + query := influxDBQueryPayload{ + Datasource: struct { + UID string `json:"uid"` + Type string `json:"type"` + }{ UID: args.DatasourceUID, Type: InfluxDBDataSourceType, }, @@ -207,24 +228,74 @@ func (ic *influxDBClient) Query(ctx context.Context, args InfluxQueryArgs, from, defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK { - bodyBytes, _ := io.ReadAll(resp.Body) + bodyBytes, _ := io.ReadAll(io.LimitReader(resp.Body, influxDBErrorResponseLimitBytes)) return nil, fmt.Errorf("InfluxDB query returned status %d: %s", resp.StatusCode, string(bodyBytes)) } // Read and parse response - body := io.LimitReader(resp.Body, 1024*1024*10) // 10MB limit - bodyBytes, err := io.ReadAll(body) + var queryResp grafana.DSQueryResponse + bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, influxDBResponseLimitBytes)) if err != nil { return nil, fmt.Errorf("reading response body: %w", err) } - var queryResp grafana.DSQueryResponse - if err := json.Unmarshal(bodyBytes, &queryResp); err != nil { - return nil, fmt.Errorf("unmarshaling response: %w", err) + + if err := unmarshalJSONWithLimitMsg(bodyBytes, &queryResp, influxDBResponseLimitBytes); err != nil { + return nil, err } return &queryResp, nil } +func findTopLevelSelectAfterCTE(query string) int { + loc := sqlCTEStartRegEx.FindStringIndex(query) + if loc == nil { + return -1 + } + i := loc[1] + + for i < len(query) { + parenIdx := strings.Index(query[i:], "(") + if parenIdx == -1 { + break + } + i += parenIdx + + depth := 0 + for i < len(query) { + switch query[i] { + case '(': + depth++ + case ')': + depth-- + } + i++ + if depth == 0 { + break + } + } + + // Skip whitespace + for i < len(query) && (query[i] == ' ' || query[i] == '\n' || query[i] == '\t' || query[i] == '\r') { + i++ + } + + if i >= len(query) { + return -1 // nothing after closing paren — malformed + } + + if query[i] == ',' { + i++ // another CTE follows + } else { + // Verify a valid SQL keyword exists here (SELECT, INSERT, UPDATE, DELETE, etc.) + if !sqlKeywordRegEx.MatchString(query[i:]) { + return -1 + } + return i + } + } + return -1 +} + func enforceQueryLimit(args *InfluxQueryArgs) { // flux, influxql limits per measurement(influxql), table(flux) level so number of measurements * limit is final records // sql limit applies on final records level @@ -239,24 +310,46 @@ func enforceQueryLimit(args *InfluxQueryArgs) { switch args.QueryType { case SQLQueryType: - // wrap query and apply limit query := strings.TrimSuffix(args.Query, ";") - args.Query = "(" + query + ")" + fmt.Sprintf(" LIMIT %d", limit) + + if sqlCTEStartRegEx.MatchString(query) { + // CTE query + pos := findTopLevelSelectAfterCTE(query) + if pos != -1 { + ctePrefix := query[:pos] // WITH a AS (...), b AS (...) + selectPart := query[pos:] // SELECT * FROM a JOIN b ON true + + if !sqlLimitRegEx.MatchString(selectPart) { + wrappedSelect := "(" + selectPart + ")" + fmt.Sprintf(" LIMIT %d", limit) + args.Query = ctePrefix + wrappedSelect + } + } + } else { + // window functions , generic queries + // wrap query and apply limit + args.Query = "(" + query + ")" + fmt.Sprintf(" LIMIT %d", limit) + } case InfluxQLQueryType: // override limits when query contains limit - if limitRegEx.Match([]byte(args.Query)) { + if influxQLLimitRegEx.Match([]byte(args.Query)) { replacement := fmt.Sprintf("${1}%d${2}${3}", limit) - args.Query = limitRegEx.ReplaceAllString(args.Query, replacement) + args.Query = influxQLLimitRegEx.ReplaceAllString(args.Query, replacement) } else { // append limit in other cases query := strings.TrimSuffix(args.Query, ";") args.Query = query + fmt.Sprintf(" LIMIT %d", limit) } - case FluxQueryType: - // A query can execute selection of multiple tables - // flux |>limit() operator applies limit per table or group - args.Query = strings.TrimSpace(args.Query) + fmt.Sprintf("\n|>limit(n:%d)", limit) + query := strings.TrimSpace(args.Query) + + if fluxLimitRegEx.MatchString(query) { + // Replace existing limit at end + args.Query = fluxLimitRegEx.ReplaceAllString(query, fmt.Sprintf("|> limit(n:%d)", limit)) + } else { + // Always append limit at end — goal is to always have limit as final operator + args.Query = query + fmt.Sprintf("\n|> limit(n:%d)", limit) + } + } } @@ -671,9 +764,9 @@ func quoteStringAsFluxLiteral(s string) string { return `"` + s + `"` } -func quoteStringAsInfluxQLLiteral(s string) string { - // InfluxQL identical as Flux - return quoteStringAsFluxLiteral(s) +// quoteStringAsInfluxQLIdentifier quotes a string as an InfluxQL identifier using double quotes. +func quoteStringAsInfluxQLIdentifier(s string) string { + return `"` + strings.ReplaceAll(s, `"`, `\"`) + `"` } func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, error) { @@ -706,7 +799,7 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, tagColumnKey = "_value" case InfluxQLQueryType: query = fmt.Sprintf(`SHOW TAG KEYS FROM %s LIMIT %d`, - quoteStringAsInfluxQLLiteral(args.Measurement), args.Limit) + quoteStringAsInfluxQLIdentifier(args.Measurement), args.Limit) tagColumnKey = "Value" } @@ -804,7 +897,7 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR fieldColumnKey = "_value" case InfluxQLQueryType: query = fmt.Sprintf(`SHOW FIELD KEYS FROM %s LIMIT %d`, - quoteStringAsInfluxQLLiteral(args.Measurement), args.Limit) + quoteStringAsInfluxQLIdentifier(args.Measurement), args.Limit) fieldColumnKey = "Value" } diff --git a/tools/influxdb_unit_test.go b/tools/influxdb_unit_test.go index 2c12135b..b312c08c 100644 --- a/tools/influxdb_unit_test.go +++ b/tools/influxdb_unit_test.go @@ -4,6 +4,7 @@ package tools import ( "errors" + "strings" "testing" "time" @@ -99,10 +100,31 @@ func Test_enforceQueryLimit(t *testing.T) { t.Run("should apply flux limit", func(t *testing.T) { args := InfluxQueryArgs{QueryType: FluxQueryType, Query: "from(bucket: \"my-bucket\")", Limit: 20} enforceQueryLimit(&args) - assert.Equal(t, "from(bucket: \"my-bucket\")\n|>limit(n:20)", args.Query, "flux query should be limited") + assert.Equal(t, "from(bucket: \"my-bucket\")\n|> limit(n:20)", args.Query, "flux query should be limited") t.Log("applied flux limit") }) + t.Run("should replace flux limit at absolute end", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: FluxQueryType, Query: "from(bucket: \"my-bucket\") |> limit(n:10)", Limit: 20} + enforceQueryLimit(&args) + assert.Equal(t, "from(bucket: \"my-bucket\") |> limit(n:20)", args.Query, "flux query should have limit replaced") + t.Log("replaced absolute end flux limit") + }) + + t.Run("should append flux limit when existing limit is not at end", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: FluxQueryType, Query: "from(bucket: \"my-bucket\") |> limit(n:10) |> count()", Limit: 20} + enforceQueryLimit(&args) + assert.Equal(t, "from(bucket: \"my-bucket\") |> limit(n:10) |> count()\n|> limit(n:20)", args.Query, "flux query should have another limit appended at end") + t.Log("appended flux limit after transformation") + }) + + t.Run("should handle whitespace and case-insensitivity in flux limit replacement", func(t *testing.T) { + args := InfluxQueryArgs{QueryType: FluxQueryType, Query: "from(bucket: \"my-bucket\") |> LIMIT ( n : 5 ) ", Limit: 20} + enforceQueryLimit(&args) + assert.Equal(t, "from(bucket: \"my-bucket\") |> limit(n:20)", args.Query, "flux query should normalize and replace limit") + t.Log("normalized and replaced flux limit") + }) + t.Run("should replace influxql limit if exists", func(t *testing.T) { args := InfluxQueryArgs{QueryType: InfluxQLQueryType, Query: "SELECT * FROM my_table LIMIT 100", Limit: 50} enforceQueryLimit(&args) @@ -156,12 +178,12 @@ func Test_extractColValues(t *testing.T) { t.Run("test_response", func(t *testing.T) { t.Run("should extract values from valid response", func(t *testing.T) { resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{ + Results: map[string]grafana.DSQueryResult{ "A": { - Frames: []grafana.DsQueryFrame{ + Frames: []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ - Fields: []grafana.DsQueryFrameField{ + Schema: grafana.DSQueryFrameSchema{ + Fields: []grafana.DSQueryFrameField{ {Name: "my_col"}, }, }, @@ -184,7 +206,7 @@ func Test_extractColValues(t *testing.T) { t.Run("should propagate error from result", func(t *testing.T) { resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{ + Results: map[string]grafana.DSQueryResult{ "A": { Error: "some target error", }, @@ -202,18 +224,18 @@ func Test_extractColValues(t *testing.T) { func Test_parseQueryResponseFrames(t *testing.T) { t.Run("test_response", func(t *testing.T) { t.Run("should parse frames successfully", func(t *testing.T) { - field1 := grafana.DsQueryFrameField{Name: "time"} - field2 := grafana.DsQueryFrameField{Name: "_value", Labels: make(map[string]string)} + field1 := grafana.DSQueryFrameField{Name: "time"} + field2 := grafana.DSQueryFrameField{Name: "_value", Labels: make(map[string]string)} field2.Labels["_field"] = "temp" resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{ + Results: map[string]grafana.DSQueryResult{ "A": { - Frames: []grafana.DsQueryFrame{ + Frames: []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ + Schema: grafana.DSQueryFrameSchema{ Name: "test_frame", - Fields: []grafana.DsQueryFrameField{ + Fields: []grafana.DSQueryFrameField{ field1, field2, }, @@ -240,7 +262,7 @@ func Test_parseQueryResponseFrames(t *testing.T) { t.Run("should return error when results contain error", func(t *testing.T) { resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{ + Results: map[string]grafana.DSQueryResult{ "A": { Error: "query failed", }, @@ -254,13 +276,13 @@ func Test_parseQueryResponseFrames(t *testing.T) { t.Run("should return error when no rows", func(t *testing.T) { resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{ + Results: map[string]grafana.DSQueryResult{ "A": { - Frames: []grafana.DsQueryFrame{ + Frames: []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ + Schema: grafana.DSQueryFrameSchema{ Name: "test_frame", - Fields: []grafana.DsQueryFrameField{ + Fields: []grafana.DSQueryFrameField{ {Name: "time"}, }, }, @@ -280,3 +302,137 @@ func Test_parseQueryResponseFrames(t *testing.T) { }) }) } + +func TestQuoting(t *testing.T) { + t.Run("quoteStringAsFluxLiteral", func(t *testing.T) { + assert.Equal(t, `"standard"`, quoteStringAsFluxLiteral("standard")) + assert.Equal(t, `"with \"quotes\""`, quoteStringAsFluxLiteral(`with "quotes"`)) + assert.Equal(t, `"with \\backslashes\\"`, quoteStringAsFluxLiteral(`with \backslashes\`)) + }) + + t.Run("quoteStringAsInfluxQLIdentifier", func(t *testing.T) { + assert.Equal(t, `"standard"`, quoteStringAsInfluxQLIdentifier("standard")) + assert.Equal(t, `"with \"quotes\""`, quoteStringAsInfluxQLIdentifier(`with "quotes"`)) + assert.Equal(t, `"with \backslashes"`, quoteStringAsInfluxQLIdentifier(`with \backslashes`)) // backslash escaping + }) + + t.Run("quoteStringAsLiteral (SQL style)", func(t *testing.T) { + assert.Equal(t, "'standard'", quoteStringAsLiteral("standard")) + assert.Equal(t, "'it''s a test'", quoteStringAsLiteral("it's a test")) + }) +} + +func TestFindTopLevelSelectAfterCTE(t *testing.T) { + tests := []struct { + name string + query string + wantPos int // -1 if not found, otherwise we just check query[pos:] starts with SELECT + wantSel string // expected string at pos (trimmed, lowercased prefix) + }{ + { + name: "single CTE", + query: `WITH a AS ( + SELECT * FROM orders + ) + SELECT * FROM a`, + wantSel: "SELECT * FROM a", + }, + { + name: "multiple CTEs", + query: `WITH a AS ( + SELECT * FROM orders + ), + b AS ( + SELECT COUNT(*) AS cnt FROM a + ) + SELECT * FROM a JOIN b ON true`, + wantSel: "SELECT * FROM a JOIN b ON true", + }, + { + name: "CTE with nested subquery inside", + query: `WITH a AS ( + SELECT * FROM (SELECT id FROM orders WHERE id IN (SELECT id FROM archive)) sub + ) + SELECT * FROM a`, + wantSel: "SELECT * FROM a", + }, + { + name: "CTE with window function in body", + query: `WITH ranked AS ( + SELECT *, ROW_NUMBER() OVER (PARTITION BY id ORDER BY created_at DESC) AS rn FROM orders + ) + SELECT * FROM ranked WHERE rn = 1`, + wantSel: "SELECT * FROM ranked WHERE rn = 1", + }, + { + name: "not a CTE query", + query: `SELECT * FROM orders`, + wantPos: -1, + }, + { + name: "empty string", + query: ``, + wantPos: -1, + }, + { + name: "CTE with no final select (malformed)", + query: `WITH a AS (SELECT * FROM orders)`, + wantPos: -1, + }, + { + name: "lowercase with", + query: `with a as ( + select * from orders + ) + select * from a`, + wantSel: "select * from a", + }, + { + name: "three CTEs", + query: `WITH a AS (SELECT * FROM t1), + b AS (SELECT * FROM t2), + c AS (SELECT * FROM a JOIN b ON a.id = b.id) + SELECT * FROM c`, + wantSel: "SELECT * FROM c", + }, + { + name: "CTE with deeply nested parens", + query: `WITH a AS ( + SELECT * FROM (SELECT * FROM (SELECT * FROM orders) t1) t2 + ) + SELECT * FROM a`, + wantSel: "SELECT * FROM a", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pos := findTopLevelSelectAfterCTE(tt.query) + + if tt.wantPos == -1 { + if pos != -1 { + t.Log(tt) + t.Errorf("expected -1, got %d", pos) + } + return + } + + if pos == -1 { + t.Fatalf("expected a valid position, got -1") + } + + got := strings.TrimSpace(tt.query[pos:]) + if !strings.EqualFold(got[:6], "select") { + t.Errorf("expected SELECT at pos %d, got: %q", pos, got[:10]) + } + + if tt.wantSel != "" { + gotTrimmed := strings.TrimSpace(got) + wantTrimmed := strings.TrimSpace(tt.wantSel) + if !strings.EqualFold(gotTrimmed, wantTrimmed) { + t.Errorf("select part mismatch:\n got: %q\n want: %q", gotTrimmed, wantTrimmed) + } + } + }) + } +} diff --git a/tools/prom_backend_cloudmonitoring.go b/tools/prom_backend_cloudmonitoring.go index 93fa88c3..b9d758d8 100644 --- a/tools/prom_backend_cloudmonitoring.go +++ b/tools/prom_backend_cloudmonitoring.go @@ -382,7 +382,7 @@ func framesToPrometheusValue(resp *grafana.DSQueryResponse, queryType string) (m return framesToMatrix(r.Frames) } -func framesToMatrix(frames []grafana.DsQueryFrame) (model.Matrix, error) { +func framesToMatrix(frames []grafana.DSQueryFrame) (model.Matrix, error) { var matrix model.Matrix for _, frame := range frames { timeIdx, valueIdx := findTimeAndValueFields(frame.Schema.Fields) @@ -425,7 +425,7 @@ func framesToMatrix(frames []grafana.DsQueryFrame) (model.Matrix, error) { return matrix, nil } -func framesToVector(frames []grafana.DsQueryFrame) (model.Vector, error) { +func framesToVector(frames []grafana.DSQueryFrame) (model.Vector, error) { var vector model.Vector for _, frame := range frames { timeIdx, valueIdx := findTimeAndValueFields(frame.Schema.Fields) @@ -465,7 +465,7 @@ func framesToVector(frames []grafana.DsQueryFrame) (model.Vector, error) { return vector, nil } -func findTimeAndValueFields(fields []grafana.DsQueryFrameField) (timeIdx, valueIdx int) { +func findTimeAndValueFields(fields []grafana.DSQueryFrameField) (timeIdx, valueIdx int) { timeIdx = -1 valueIdx = -1 for i, f := range fields { diff --git a/tools/prom_backend_cloudmonitoring_test.go b/tools/prom_backend_cloudmonitoring_test.go index 431aaab8..b9eb7a2b 100644 --- a/tools/prom_backend_cloudmonitoring_test.go +++ b/tools/prom_backend_cloudmonitoring_test.go @@ -15,11 +15,11 @@ import ( func TestFramesToMatrix(t *testing.T) { t.Run("single series", func(t *testing.T) { - frames := []grafana.DsQueryFrame{ + frames := []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ + Schema: grafana.DSQueryFrameSchema{ Name: "cpu_usage", - Fields: []grafana.DsQueryFrameField{ + Fields: []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "a"}}, }, @@ -44,18 +44,18 @@ func TestFramesToMatrix(t *testing.T) { }) t.Run("multiple series", func(t *testing.T) { - frames := []grafana.DsQueryFrame{ + frames := []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ + Schema: grafana.DSQueryFrameSchema{ Name: "cpu", - Fields: []grafana.DsQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "a"}}}, + Fields: []grafana.DSQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "a"}}}, }, Data: grafana.DSQueryFrameData{Values: [][]interface{}{{float64(1000)}, {float64(0.5)}}}, }, { - Schema: grafana.DsQueryFrameSchema{ + Schema: grafana.DSQueryFrameSchema{ Name: "cpu", - Fields: []grafana.DsQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "b"}}}, + Fields: []grafana.DSQueryFrameField{{Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"host": "b"}}}, }, Data: grafana.DSQueryFrameData{Values: [][]interface{}{{float64(1000)}, {float64(0.8)}}}, }, @@ -73,10 +73,10 @@ func TestFramesToMatrix(t *testing.T) { }) t.Run("frame missing time field", func(t *testing.T) { - frames := []grafana.DsQueryFrame{ + frames := []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ - Fields: []grafana.DsQueryFrameField{{Name: "Value", Type: "number"}}, + Schema: grafana.DSQueryFrameSchema{ + Fields: []grafana.DSQueryFrameField{{Name: "Value", Type: "number"}}, }, Data: grafana.DSQueryFrameData{Values: [][]interface{}{{float64(1.0)}}}, }, @@ -90,11 +90,11 @@ func TestFramesToMatrix(t *testing.T) { func TestFramesToVector(t *testing.T) { t.Run("single sample", func(t *testing.T) { - frames := []grafana.DsQueryFrame{ + frames := []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ + Schema: grafana.DSQueryFrameSchema{ Name: "up", - Fields: []grafana.DsQueryFrameField{ + Fields: []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"job": "prometheus"}}, }, @@ -118,10 +118,10 @@ func TestFramesToVector(t *testing.T) { }) t.Run("takes last value from multi-point frame", func(t *testing.T) { - frames := []grafana.DsQueryFrame{ + frames := []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ - Fields: []grafana.DsQueryFrameField{ + Schema: grafana.DSQueryFrameSchema{ + Fields: []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number"}, }, @@ -151,7 +151,7 @@ func TestFramesToVector(t *testing.T) { func TestFramesToPrometheusValue(t *testing.T) { t.Run("missing refId returns empty", func(t *testing.T) { - resp := &grafana.DSQueryResponse{Results: map[string]grafana.DsQueryResult{}} + resp := &grafana.DSQueryResponse{Results: map[string]grafana.DSQueryResult{}} v, err := framesToPrometheusValue(resp, "range") require.NoError(t, err) assert.Equal(t, model.Matrix{}, v) @@ -162,7 +162,7 @@ func TestFramesToPrometheusValue(t *testing.T) { }) t.Run("error in result", func(t *testing.T) { - resp := &grafana.DSQueryResponse{Results: map[string]grafana.DsQueryResult{ + resp := &grafana.DSQueryResponse{Results: map[string]grafana.DSQueryResult{ "A": {Error: "something went wrong"}, }} _, err := framesToPrometheusValue(resp, "range") @@ -236,28 +236,28 @@ func TestMapGCPMetricKind(t *testing.T) { func TestExtractLabelValuesFromFrames(t *testing.T) { resp := &grafana.DSQueryResponse{ - Results: map[string]grafana.DsQueryResult{ + Results: map[string]grafana.DSQueryResult{ "A": { - Frames: []grafana.DsQueryFrame{ + Frames: []grafana.DSQueryFrame{ { - Schema: grafana.DsQueryFrameSchema{ - Fields: []grafana.DsQueryFrameField{ + Schema: grafana.DSQueryFrameSchema{ + Fields: []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"zone": "us-east1-b", "project_id": "my-project"}}, }, }, }, { - Schema: grafana.DsQueryFrameSchema{ - Fields: []grafana.DsQueryFrameField{ + Schema: grafana.DSQueryFrameSchema{ + Fields: []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"zone": "us-west1-a", "project_id": "my-project"}}, }, }, }, { - Schema: grafana.DsQueryFrameSchema{ - Fields: []grafana.DsQueryFrameField{ + Schema: grafana.DSQueryFrameSchema{ + Fields: []grafana.DSQueryFrameField{ {Name: "Time", Type: "time"}, {Name: "Value", Type: "number", Labels: map[string]string{"zone": "us-east1-b", "project_id": "other-project"}}, }, From ac563f6ac9f2ba751462b0925ceab5b2dcf1b0d2 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Mon, 30 Mar 2026 12:35:30 +0530 Subject: [PATCH 23/24] refractor(tools): rename tool_name influxdb tools --- tools/elasticsearch.go | 2 +- tools/influxdb.go | 14 +++++++------- tools/loki.go | 2 +- tools/sift.go | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/elasticsearch.go b/tools/elasticsearch.go index ac7c1d35..47ad86b0 100644 --- a/tools/elasticsearch.go +++ b/tools/elasticsearch.go @@ -26,7 +26,7 @@ const ( ElasticsearchDatasourceType = "elasticsearch" ) -const elasticSearchResponseLimitBytes = 1024 * 1024 * 10 //10MB +const elasticSearchResponseLimitBytes = 1024 * 1024 * 10 // 10MB // ElasticsearchClient handles queries to an Elasticsearch datasource via Grafana proxy type ElasticsearchClient struct { diff --git a/tools/influxdb.go b/tools/influxdb.go index b549489b..87f0911c 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -526,7 +526,7 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, var QueryInflux = mcpgrafana.MustTool( "query_influx", - "Queries InfluxDB datasource, supports one of Flux, SQL, or InfluxQL query languages. Use in order: list_datasources -> get_datasource to determine query language configured for datasource.Use both list_field_keys_influxdb , list_tag_keys_influxdb to determine the available columns", + "Queries InfluxDB datasource, supports one of Flux, SQL, or InfluxQL query languages. Use in order: list_datasources -> get_datasource to determine query language configured for datasource.Use both list_influxdb_field_keys , list_influxdb_tag_keys to determine the available columns", queryInflux, mcp.WithTitleAnnotation("Query InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -723,8 +723,8 @@ func listMeasurements(ctx context.Context, args ListMeasurementsArgs) (*ListMeas } var ListMeasurements = mcpgrafana.MustTool( - "list_measurements_influxdb", - "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_measurements_influxdb", + "list_influxdb_measurements", + "Lists Measurements of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_influxdb_measurements", listMeasurements, mcp.WithTitleAnnotation("List Measurements InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -836,8 +836,8 @@ func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, } var ListTagKeys = mcpgrafana.MustTool( - "list_tag_keys_influxdb", - "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_measurements_influxdb -> list_tag_keys_influxdb", + "list_influxdb_tag_keys", + "Lists Tag Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_influxdb_measurements -> list_influxdb_tag_keys", listTagKeys, mcp.WithTitleAnnotation("List Tag Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), @@ -934,8 +934,8 @@ func listFieldKeys(ctx context.Context, args ListFieldKeysArgs) (*ListFieldKeysR } var ListFieldKeys = mcpgrafana.MustTool( - "list_field_keys_influxdb", - "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_measurements_influxdb -> list_field_keys_influxdb", + "list_influxdb_field_keys", + "Lists Field Keys of an InfluxDB datasource identified by its UID. Use in order: list_datasources -> get_datasource -> list_influxdb_buckets (required only for Flux linked datasource) -> list_influxdb_measurements -> list_influxdb_field_keys", listFieldKeys, mcp.WithTitleAnnotation("List Field Keys InfluxDB"), mcp.WithIdempotentHintAnnotation(true), diff --git a/tools/loki.go b/tools/loki.go index 6d1404c3..c7254bb1 100644 --- a/tools/loki.go +++ b/tools/loki.go @@ -138,7 +138,7 @@ func (c *Client) makeRequest(ctx context.Context, method, urlPath string, params } // Read the response body with a limit to prevent memory issues - body := io.LimitReader(resp.Body, 1024*1024*10) //10MB limit + body := io.LimitReader(resp.Body, 1024*1024*10) // 10MB limit bodyBytes, err := io.ReadAll(body) if err != nil { return nil, fmt.Errorf("reading response body: %w", err) diff --git a/tools/sift.go b/tools/sift.go index 4499225a..440b83e5 100644 --- a/tools/sift.go +++ b/tools/sift.go @@ -28,7 +28,7 @@ const ( // errorPatternLogExampleLimit controls how many log examples are fetched per error pattern. const errorPatternLogExampleLimit = 3 -const siftResponseLimitBytes = 1024 * 1024 * 10 //10MB +const siftResponseLimitBytes = 1024 * 1024 * 10 // 10MB type analysisStatus string From 41d23d6ba33aec0c83facf9516093d45306e98f4 Mon Sep 17 00:00:00 2001 From: Shaik-Sirajuddin Date: Tue, 31 Mar 2026 10:32:32 +0530 Subject: [PATCH 24/24] chore(tools): escape special characters influxql --- tools/influxdb.go | 11 +++++--- tools/influxdb_unit_test.go | 55 +++++++++++++++++++++++++++++++++++-- 2 files changed, 59 insertions(+), 7 deletions(-) diff --git a/tools/influxdb.go b/tools/influxdb.go index 87f0911c..09064d32 100644 --- a/tools/influxdb.go +++ b/tools/influxdb.go @@ -524,8 +524,8 @@ func queryInflux(ctx context.Context, args InfluxQueryArgs) (*InfluxQueryResult, return &result, nil } -var QueryInflux = mcpgrafana.MustTool( - "query_influx", +var QueryInfluxDB = mcpgrafana.MustTool( + "query_influxdb", "Queries InfluxDB datasource, supports one of Flux, SQL, or InfluxQL query languages. Use in order: list_datasources -> get_datasource to determine query language configured for datasource.Use both list_influxdb_field_keys , list_influxdb_tag_keys to determine the available columns", queryInflux, mcp.WithTitleAnnotation("Query InfluxDB"), @@ -766,7 +766,10 @@ func quoteStringAsFluxLiteral(s string) string { // quoteStringAsInfluxQLIdentifier quotes a string as an InfluxQL identifier using double quotes. func quoteStringAsInfluxQLIdentifier(s string) string { - return `"` + strings.ReplaceAll(s, `"`, `\"`) + `"` + // Must escape backslashes FIRST, then double quotes + s = strings.ReplaceAll(s, `\`, `\\`) // \ → \\ + s = strings.ReplaceAll(s, `"`, `\"`) // " → \" + return `"` + s + `"` } func listTagKeys(ctx context.Context, args ListTagKeysArgs) (*ListTagKeysResult, error) { @@ -943,7 +946,7 @@ var ListFieldKeys = mcpgrafana.MustTool( ) func AddInfluxTools(server *server.MCPServer) { - QueryInflux.Register(server) + QueryInfluxDB.Register(server) ListBucketsInflux.Register(server) ListMeasurements.Register(server) ListTagKeys.Register(server) diff --git a/tools/influxdb_unit_test.go b/tools/influxdb_unit_test.go index b312c08c..80b4cf3c 100644 --- a/tools/influxdb_unit_test.go +++ b/tools/influxdb_unit_test.go @@ -311,9 +311,58 @@ func TestQuoting(t *testing.T) { }) t.Run("quoteStringAsInfluxQLIdentifier", func(t *testing.T) { - assert.Equal(t, `"standard"`, quoteStringAsInfluxQLIdentifier("standard")) - assert.Equal(t, `"with \"quotes\""`, quoteStringAsInfluxQLIdentifier(`with "quotes"`)) - assert.Equal(t, `"with \backslashes"`, quoteStringAsInfluxQLIdentifier(`with \backslashes`)) // backslash escaping + tests := []struct { + input string + expected string + message string + }{ + { + input: "standard", + expected: `"standard"`, + message: "plain string with no special characters should just be wrapped in double quotes", + }, + { + input: `with "quotes"`, + expected: `"with \"quotes\""`, + message: "double quotes inside string should be escaped as \"", + }, + { + input: `with \backslashes`, + expected: `"with \\backslashes"`, + message: "backslashes should be escaped as \\\\ before quote escaping", + }, + { + input: `trailing\`, + expected: `"trailing\\"`, + message: "trailing backslash must be escaped to prevent unterminated identifier bug", + }, + { + input: `slash\"quote`, + expected: `"slash\\\"quote"`, + message: "backslash immediately before a double quote must both be escaped independently", + }, + { + input: "", + expected: `""`, + message: "empty string should produce a valid empty identifier", + }, + { + input: `"`, + expected: `"\""`, + message: "a lone double quote should be escaped as \"", + }, + { + input: `\`, + expected: `"\\"`, + message: "a lone backslash must be escaped to prevent unterminated identifier", + }, + } + + for _, tt := range tests { + t.Run(tt.message, func(t *testing.T) { + assert.Equal(t, tt.expected, quoteStringAsInfluxQLIdentifier(tt.input), tt.message) + }) + } }) t.Run("quoteStringAsLiteral (SQL style)", func(t *testing.T) {