diff --git a/bin/unjson-dashboards/go.sum b/bin/unjson-dashboards/go.sum index 4a1211711..c78a50cc1 100644 --- a/bin/unjson-dashboards/go.sum +++ b/bin/unjson-dashboards/go.sum @@ -20,12 +20,14 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/ci/it/go.sum b/ci/it/go.sum index 58d04871a..87cae7fab 100644 --- a/ci/it/go.sum +++ b/ci/it/go.sum @@ -2,26 +2,56 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/ClickHouse/ch-go v0.61.3 h1:MmBwUhXrAOBZK7n/sWBzq6FdIQ01cuF2SaaO8KlDRzI= github.com/ClickHouse/ch-go v0.61.3/go.mod h1:1PqXjMz/7S1ZUaKvwPA3i35W2bz2mAMFeCi6DIXgGwQ= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go/v2 v2.20.0 h1:bvlLQ31XJfl7MxIqAq2l1G6JhHYzqEXdvfpMeU6bkKc= github.com/ClickHouse/clickhouse-go/v2 v2.20.0/go.mod h1:VQfyA+tCwCRw2G7ogfY8V0fq/r0yJWzy8UDrjiP/Lbs= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= +github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= +github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= +github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -29,18 +59,25 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dmarkham/enumer v1.5.9/go.mod h1:e4VILe2b1nYK3JKJpRmNdl5xbDQvELc6tQ8b+GsGk6E= github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -48,9 +85,12 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -58,15 +98,25 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/intel/goresctrl v0.5.0/go.mod h1:mIe63ggylWYr0cU/l8n11FAkesqfvuP3oktIsxvu0T0= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -77,18 +127,30 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -96,9 +158,14 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -107,10 +174,17 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -121,9 +195,12 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -131,6 +208,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/testcontainers/testcontainers-go v0.33.0 h1:zJS9PfXYT5O0ZFXM2xxXfk4J5UMw/kRiISng037Gxdw= github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -138,21 +217,32 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= @@ -163,14 +253,19 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -178,10 +273,12 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -212,11 +309,14 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= @@ -231,8 +331,23 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= +k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y= +tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws= diff --git a/cmd/experimental/go.sum b/cmd/experimental/go.sum index 93feef1a6..9cb44e940 100644 --- a/cmd/experimental/go.sum +++ b/cmd/experimental/go.sum @@ -1,38 +1,113 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go/v2 v2.32.2 h1:Y8fAXt0CpLhqNXMLlSddg+cMfAr7zHBWqXLpih6ozCY= github.com/ClickHouse/clickhouse-go/v2 v2.32.2/go.mod h1:/vE8N/+9pozLkIiTMWbNUGviccDv/czEGS1KACvpXIk= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.57.0/go.mod h1:Po5HwoDd4FmT/EqgrE9x7Zz4LjxtGBSIuNY1C1lppBQ= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= github.com/DataDog/go-sqllexer v0.1.1 h1:45wV74bIqAeEGUd3VyM78I7tfarjAY/XZzklJ+FQjmk= github.com/DataDog/go-sqllexer v0.1.1/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/H0llyW00dzZ/cidr v1.2.1 h1:DfRHX+RqVVKZijQGO1aJSaWvN9Saan8sycK/4wrfY5g= github.com/H0llyW00dzZ/cidr v1.2.1/go.mod h1:S+EgYkMandSAN27mGNG/CB3jeoXDAyalsvvVFpWdnXc= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Shopify/toxiproxy/v2 v2.9.0/go.mod h1:2uPRyxR46fsx2yUr9i8zcejzdkWfK7p6G23jV/X6YNs= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15/go.mod h1:0QEmQSSWMVfiAk93l1/ayR9DQ9+jwni7gHS2NARZXB0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0= github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dmarkham/enumer v1.5.10/go.mod h1:e4VILe2b1nYK3JKJpRmNdl5xbDQvELc6tQ8b+GsGk6E= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -44,10 +119,14 @@ github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -55,21 +134,45 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/huandu/go-clone v1.7.2/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ip2location/ip2location-go/v9 v9.7.1/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -78,6 +181,10 @@ github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= @@ -88,6 +195,7 @@ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCy github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/parsers/json v0.1.0 h1:dzSZl5pf5bBcW0Acnu20Djleto19T0CfHcvZ14NJ6fU= @@ -105,33 +213,77 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA= github.com/markbates/goth v1.80.0 h1:NnvatczZDzOs1hn9Ug+dVYf2Viwwkp/ZDX5K+GLjan8= github.com/markbates/goth v1.80.0/go.mod h1:4/GYHo+W6NWisrMPZnq0Yr2Q70UntNLn7KXEFhrIdAY= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opentracing-contrib/go-grpc v0.0.0-20240724223109-9dec25a38fa8/go.mod h1:z1k3YVSdAPSXtMUPS1TBWG5DaNWlT+VCbB0Qm3QJe74= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -147,13 +299,20 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -162,16 +321,28 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b h1:MNaGusDfB1qxEsl6iVb33Gbe777IKzPP5PDta0xGC8M= github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -183,29 +354,50 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= +go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -216,6 +408,7 @@ golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -246,6 +439,7 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -258,24 +452,47 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= +google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.1/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.60.1/go.mod h1:xJuobKuNxKH3RUatS7GjR+suWj+5c2K7bi4m/S5arOY= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= vitess.io/vitess v0.21.2 h1:SbMdGngyhYurvh2KTZ92VkR5DH5taib+HH4xTkftUWU= vitess.io/vitess v0.21.2/go.mod h1:n37n5rmIBHYWnoPZod9umrtExlUR/9SbR3VGmanYNMU= diff --git a/cmd/go.sum b/cmd/go.sum index 93feef1a6..f2d2f05df 100644 --- a/cmd/go.sum +++ b/cmd/go.sum @@ -1,38 +1,113 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go/v2 v2.32.2 h1:Y8fAXt0CpLhqNXMLlSddg+cMfAr7zHBWqXLpih6ozCY= github.com/ClickHouse/clickhouse-go/v2 v2.32.2/go.mod h1:/vE8N/+9pozLkIiTMWbNUGviccDv/czEGS1KACvpXIk= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.57.0/go.mod h1:Po5HwoDd4FmT/EqgrE9x7Zz4LjxtGBSIuNY1C1lppBQ= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= github.com/DataDog/go-sqllexer v0.1.1 h1:45wV74bIqAeEGUd3VyM78I7tfarjAY/XZzklJ+FQjmk= github.com/DataDog/go-sqllexer v0.1.1/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/H0llyW00dzZ/cidr v1.2.1 h1:DfRHX+RqVVKZijQGO1aJSaWvN9Saan8sycK/4wrfY5g= github.com/H0llyW00dzZ/cidr v1.2.1/go.mod h1:S+EgYkMandSAN27mGNG/CB3jeoXDAyalsvvVFpWdnXc= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Shopify/toxiproxy/v2 v2.9.0/go.mod h1:2uPRyxR46fsx2yUr9i8zcejzdkWfK7p6G23jV/X6YNs= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15/go.mod h1:0QEmQSSWMVfiAk93l1/ayR9DQ9+jwni7gHS2NARZXB0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0= github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dmarkham/enumer v1.5.10/go.mod h1:e4VILe2b1nYK3JKJpRmNdl5xbDQvELc6tQ8b+GsGk6E= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -44,10 +119,14 @@ github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -55,21 +134,45 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/huandu/go-clone v1.7.2/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ip2location/ip2location-go/v9 v9.7.1/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -78,6 +181,10 @@ github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= @@ -88,6 +195,7 @@ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCy github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/parsers/json v0.1.0 h1:dzSZl5pf5bBcW0Acnu20Djleto19T0CfHcvZ14NJ6fU= @@ -105,33 +213,77 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA= github.com/markbates/goth v1.80.0 h1:NnvatczZDzOs1hn9Ug+dVYf2Viwwkp/ZDX5K+GLjan8= github.com/markbates/goth v1.80.0/go.mod h1:4/GYHo+W6NWisrMPZnq0Yr2Q70UntNLn7KXEFhrIdAY= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opentracing-contrib/go-grpc v0.0.0-20240724223109-9dec25a38fa8/go.mod h1:z1k3YVSdAPSXtMUPS1TBWG5DaNWlT+VCbB0Qm3QJe74= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -147,13 +299,20 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -162,16 +321,29 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b h1:MNaGusDfB1qxEsl6iVb33Gbe777IKzPP5PDta0xGC8M= github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -183,29 +355,50 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= +go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -216,6 +409,7 @@ golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -246,6 +440,7 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -258,24 +453,47 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= +google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.1/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.60.1/go.mod h1:xJuobKuNxKH3RUatS7GjR+suWj+5c2K7bi4m/S5arOY= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= vitess.io/vitess v0.21.2 h1:SbMdGngyhYurvh2KTZ92VkR5DH5taib+HH4xTkftUWU= vitess.io/vitess v0.21.2/go.mod h1:n37n5rmIBHYWnoPZod9umrtExlUR/9SbR3VGmanYNMU= diff --git a/platform/frontend_connectors/search_common_table_test.go b/platform/frontend_connectors/search_common_table_test.go index 96e7f3108..8326e71bd 100644 --- a/platform/frontend_connectors/search_common_table_test.go +++ b/platform/frontend_connectors/search_common_table_test.go @@ -29,7 +29,6 @@ func TestSearchCommonTable(t *testing.T) { Rows []*sqlmock.Rows IndexPattern string }{ - { // [0] Name: "query non virtual table", IndexPattern: "logs-3", @@ -43,7 +42,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message" FROM "logs-3" LIMIT 10`, + "SELECT `@timestamp`, `message` FROM `logs-3` LIMIT 10", }, }, @@ -60,7 +59,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message" FROM quesma_common_table WHERE "__quesma_index_name"='logs-1' LIMIT 10`, + "SELECT `@timestamp`, `message` FROM `quesma_common_table` WHERE `__quesma_index_name`='logs-1' LIMIT 10", }, }, @@ -77,7 +76,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message", "__quesma_index_name" FROM quesma_common_table WHERE ("__quesma_index_name"='logs-1' OR "__quesma_index_name"='logs-2') LIMIT 10`, + "SELECT `@timestamp`, `message`, `__quesma_index_name` FROM `quesma_common_table` WHERE (`__quesma_index_name`='logs-1' OR `__quesma_index_name`='logs-2') LIMIT 10", }, }, @@ -94,7 +93,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message", "__quesma_index_name" FROM quesma_common_table WHERE ("__quesma_index_name"='logs-1' OR "__quesma_index_name"='logs-2') LIMIT 10`, + "SELECT `@timestamp`, `message`, `__quesma_index_name` FROM `quesma_common_table` WHERE (`__quesma_index_name`='logs-1' OR `__quesma_index_name`='logs-2') LIMIT 10", }, }, @@ -111,7 +110,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message", "__quesma_index_name" FROM quesma_common_table WHERE startsWith("__quesma_index_name",'daily-') LIMIT 10`, + "SELECT `@timestamp`, `message`, `__quesma_index_name` FROM `quesma_common_table` WHERE startsWith(`__quesma_index_name`,'daily-') LIMIT 10", }, }, @@ -128,7 +127,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message", "__quesma_index_name" FROM quesma_common_table WHERE ("__quesma_index_name"='logs-1' OR "__quesma_index_name"='logs-2') LIMIT 10`, + "SELECT `@timestamp`, `message`, `__quesma_index_name` FROM `quesma_common_table` WHERE (`__quesma_index_name`='logs-1' OR `__quesma_index_name`='logs-2') LIMIT 10", }, }, @@ -145,7 +144,7 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT "@timestamp", "message", "__quesma_index_name" FROM quesma_common_table WHERE ("__quesma_index_name"='logs-1' OR "__quesma_index_name"='logs-2') LIMIT 10`, + "SELECT `@timestamp`, `message`, `__quesma_index_name` FROM `quesma_common_table` WHERE (`__quesma_index_name`='logs-1' OR `__quesma_index_name`='logs-2') LIMIT 10", }, }, @@ -185,8 +184,8 @@ func TestSearchCommonTable(t *testing.T) { } }`, WantedSql: []string{ - `SELECT countIf("@timestamp"=toInt64(toUnixTimestamp(toStartOfDay(subDate(now(), INTERVAL 3 week)))) AND "@timestamp"=toInt64(toUnixTimestamp('2024-04-14'))) AS "range_2__aggr__2__count" FROM quesma_common_table WHERE ("__quesma_index_name"='logs-1' OR "__quesma_index_name"='logs-2') -- optimizations: pancake(half)`, - `SELECT "@timestamp", "message", "__quesma_index_name" FROM quesma_common_table WHERE ("__quesma_index_name"='logs-1' OR "__quesma_index_name"='logs-2') LIMIT 10`, + "SELECT countIf(`@timestamp`=toInt64(toUnixTimestamp(toStartOfDay(subDate(now(), INTERVAL 3 week)))) AND `@timestamp`=toInt64(toUnixTimestamp('2024-04-14'))) AS `range_2__aggr__2__count` FROM `quesma_common_table` WHERE (`__quesma_index_name`='logs-1' OR `__quesma_index_name`='logs-2') -- optimizations: pancake(half)", + "SELECT `@timestamp`, `message`, `__quesma_index_name` FROM `quesma_common_table` WHERE (`__quesma_index_name`='logs-1' OR `__quesma_index_name`='logs-2') LIMIT 10", }, // we need to return some rows, otherwise pancakes will fail Rows: []*sqlmock.Rows{ diff --git a/platform/frontend_connectors/search_test.go b/platform/frontend_connectors/search_test.go index b7cfad782..038e62956 100644 --- a/platform/frontend_connectors/search_test.go +++ b/platform/frontend_connectors/search_test.go @@ -289,11 +289,15 @@ func TestSearchHandler(t *testing.T) { var selectColumns []string for k := range tab.Cols { - selectColumns = append(selectColumns, strconv.Quote(k)) + selectColumns = append(selectColumns, k) } sort.Strings(selectColumns) - testSuiteSelectPlaceHolder := "SELECT \"message\"" + for i := range selectColumns { + selectColumns[i] = util.BackquoteIdentifier(selectColumns[i]) + } + + testSuiteSelectPlaceHolder := "SELECT `message`" selectCMD := fmt.Sprintf("SELECT %s ", strings.Join(selectColumns, ", ")) s := &schema.StaticRegistry{ @@ -322,7 +326,7 @@ func TestSearchHandler(t *testing.T) { // So we should have a different expectation here. // HACK. we change expectations here - wantedRegex = strings.ReplaceAll(wantedRegex, model.FullTextFieldNamePlaceHolder, "message") + wantedRegex = strings.ReplaceAll(wantedRegex, model.FullTextFieldNamePlaceHolder, "`message`") if tt.WantedQueryType == model.ListAllFields && strings.HasPrefix(wantedRegex, testSuiteSelectPlaceHolder) { @@ -330,8 +334,9 @@ func TestSearchHandler(t *testing.T) { } - mock.ExpectQuery(testdata.EscapeWildcard(testdata.EscapeBrackets(wantedRegex))). - WillReturnRows(sqlmock.NewRows([]string{"@timestamp", "host.name"})) + ss := testdata.EscapeWildcard(testdata.EscapeBrackets(wantedRegex)) + mock.ExpectQuery(ss). + WillReturnRows(sqlmock.NewRows([]string{"@timestamp", "host_name"})) } } else { for _, query := range tt.WantedQueries { @@ -584,38 +589,38 @@ func TestHandlingDateTimeFields(t *testing.T) { }` } expectedSelectStatement := map[string]string{ - dateTimeTimestampField: `SELECT toInt64(toUnixTimestamp("timestamp") / 60) AS "aggr__0__key_0", - count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ((("timestamp64">=fromUnixTimestamp64Milli(1706542596491) AND - "timestamp64"<=fromUnixTimestamp64Milli(1706551896491)) AND ("timestamp">= - fromUnixTimestamp(1706542596) AND "timestamp"<=fromUnixTimestamp(1706551896))) - AND NOT (("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND - "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))) - GROUP BY toInt64(toUnixTimestamp("timestamp") / 60) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, - dateTime64TimestampField: `SELECT toInt64(toUnixTimestamp64Milli("timestamp64") / 60000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ((("timestamp64">=fromUnixTimestamp64Milli(1706542596491) AND - "timestamp64"<=fromUnixTimestamp64Milli(1706551896491)) AND ("timestamp">= - fromUnixTimestamp(1706542596) AND "timestamp"<=fromUnixTimestamp(1706551896))) - AND NOT (("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND - "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))) - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp64") / 60000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, - dateTime64OurTimestampField: `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__0__key_0" - , count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ((("timestamp64">=fromUnixTimestamp64Milli(1706542596491) AND - "timestamp64"<=fromUnixTimestamp64Milli(1706551896491)) AND ("timestamp">= - fromUnixTimestamp(1706542596) AND "timestamp"<=fromUnixTimestamp(1706551896))) - AND NOT (("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND - "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + dateTimeTimestampField: "SELECT toInt64(toUnixTimestamp(`timestamp`) / 60) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`timestamp64`>=fromUnixTimestamp64Milli(1706542596491) AND\n" + + " `timestamp64`<=fromUnixTimestamp64Milli(1706551896491)) AND (`timestamp`>=\n" + + " fromUnixTimestamp(1706542596) AND `timestamp`<=fromUnixTimestamp(1706551896)))\n" + + " AND NOT ((`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND\n" + + " `@timestamp`<=fromUnixTimestamp64Milli(1706551896491)))) \n" + + "GROUP BY toInt64(toUnixTimestamp(`timestamp`) / 60) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", + dateTime64TimestampField: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp64`) / 60000) AS\n" + + " `aggr__0__key_0`, count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`timestamp64`>=fromUnixTimestamp64Milli(1706542596491) AND\n" + + " `timestamp64`<=fromUnixTimestamp64Milli(1706551896491)) AND (`timestamp`>=\n" + + " fromUnixTimestamp(1706542596) AND `timestamp`<=fromUnixTimestamp(1706551896)))\n" + + " AND NOT ((`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND\n" + + " `@timestamp`<=fromUnixTimestamp64Milli(1706551896491))))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp64`) / 60000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", + dateTime64OurTimestampField: "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS `aggr__0__key_0`\n" + + " , count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`timestamp64`>=fromUnixTimestamp64Milli(1706542596491) AND\n" + + " `timestamp64`<=fromUnixTimestamp64Milli(1706551896491)) AND (`timestamp`>=\n" + + " fromUnixTimestamp(1706542596) AND `timestamp`<=fromUnixTimestamp(1706551896)))\n" + + " AND NOT ((`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND\n" + + " `@timestamp`<=fromUnixTimestamp64Milli(1706551896491))))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", } conn, mock := util.InitSqlMockWithPrettySqlAndPrint(t, false) @@ -1001,7 +1006,7 @@ func TestSearchAfterParameter_sortByJustTimestamp(t *testing.T) { }{ { request: `{"size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "message" FROM __quesma_table_name ORDER BY "@timestamp" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `message` FROM `__quesma_table_name` ORDER BY `@timestamp` DESC LIMIT 3", resultRowsFromDB: [][]any{{someTime, "m1"}, {someTime, "m2"}, {someTime, "m3"}}, basicAndFastSortFieldPerHit: []int64{someTime.UnixMilli(), someTime.UnixMilli(), someTime.UnixMilli()}, expectedSortValuesCount: 1, @@ -1017,21 +1022,21 @@ func TestSearchAfterParameter_sortByJustTimestamp(t *testing.T) { {"_doc": {"unmapped_type": "boolean", "order": "desc"}} ] }`, - expectedSQL: `SELECT "@timestamp", "message" FROM __quesma_table_name WHERE fromUnixTimestamp64Milli(1706551896491)>"@timestamp" ORDER BY "@timestamp" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `message` FROM `__quesma_table_name` WHERE fromUnixTimestamp64Milli(1706551896491)>`@timestamp` ORDER BY `@timestamp` DESC LIMIT 3", resultRowsFromDB: [][]any{{sub(1), "m8"}, {sub(2), "m9"}, {sub(3), "m10"}}, basicAndFastSortFieldPerHit: []int64{sub(1).UnixMilli(), sub(2).UnixMilli(), sub(3).UnixMilli()}, expectedSortValuesCount: 2, }, { request: `{"search_after": [1706551896488], "size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "message" FROM __quesma_table_name WHERE fromUnixTimestamp64Milli(1706551896488)>"@timestamp" ORDER BY "@timestamp" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `message` FROM `__quesma_table_name` WHERE fromUnixTimestamp64Milli(1706551896488)>`@timestamp` ORDER BY `@timestamp` DESC LIMIT 3", resultRowsFromDB: [][]any{{sub(4), "m11"}, {sub(5), "m12"}, {sub(6), "m13"}}, basicAndFastSortFieldPerHit: []int64{sub(4).UnixMilli(), sub(5).UnixMilli(), sub(6).UnixMilli()}, expectedSortValuesCount: 1, }, { request: `{"search_after": [1706551896485], "size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "message" FROM __quesma_table_name WHERE fromUnixTimestamp64Milli(1706551896485)>"@timestamp" ORDER BY "@timestamp" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `message` FROM `__quesma_table_name` WHERE fromUnixTimestamp64Milli(1706551896485)>`@timestamp` ORDER BY `@timestamp` DESC LIMIT 3", resultRowsFromDB: [][]any{{sub(7), "m14"}, {sub(8), "m15"}, {sub(9), "m16"}}, basicAndFastSortFieldPerHit: []int64{sub(7).UnixMilli(), sub(8).UnixMilli(), sub(9).UnixMilli()}, expectedSortValuesCount: 1, @@ -1131,22 +1136,22 @@ func TestSearchAfterParameter_sortByJustOneStringField(t *testing.T) { }{ { request: `{"size": 3, "track_total_hits": false, "sort": [{"message": {"order": "asc"}}]}`, - expectedSQL: `SELECT "message" FROM __quesma_table_name ORDER BY "message" ASC LIMIT 3`, + expectedSQL: "SELECT `message` FROM `__quesma_table_name` ORDER BY `message` ASC LIMIT 3", resultRowsFromDB: []any{"m1", "m1", "m1"}, }, { request: `{"search_after": ["m1"], "size": 3, "track_total_hits": false, "sort": [{"message": {"order": "asc"}}]}`, - expectedSQL: `SELECT "message" FROM __quesma_table_name WHERE "message">'m1' ORDER BY "message" ASC LIMIT 3`, + expectedSQL: "SELECT `message` FROM `__quesma_table_name` WHERE `message`>'m1' ORDER BY `message` ASC LIMIT 3", resultRowsFromDB: []any{"m2", "m2", "m3"}, }, { request: `{"search_after": ["m3"], "size": 3, "track_total_hits": false, "sort": [{"message": {"order": "asc"}}]}`, - expectedSQL: `SELECT "message" FROM __quesma_table_name WHERE "message">'m3' ORDER BY "message" ASC LIMIT 3`, + expectedSQL: "SELECT `message` FROM `__quesma_table_name` WHERE `message`>'m3' ORDER BY `message` ASC LIMIT 3", resultRowsFromDB: []any{"m4", "m5", "m6"}, }, { request: `{"search_after": ["m6"], "size": 3, "track_total_hits": false, "sort": [{"message": {"order": "asc"}}]}`, - expectedSQL: `SELECT "message" FROM __quesma_table_name WHERE "message">'m6' ORDER BY "message" ASC LIMIT 3`, + expectedSQL: "SELECT `message` FROM `__quesma_table_name` WHERE `message`>'m6' ORDER BY `message` ASC LIMIT 3", resultRowsFromDB: []any{"m7", "m8", "m9"}, }, } @@ -1254,7 +1259,7 @@ func TestSearchAfterParameter_sortByMultipleFields(t *testing.T) { }{ { request: `{"size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}, {"message": {"order": "asc"}}, {"bicep_size": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "bicep_size", "message" FROM __quesma_table_name ORDER BY "@timestamp" DESC, "message" ASC, "bicep_size" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `bicep_size`, `message` FROM `__quesma_table_name` ORDER BY `@timestamp` DESC, `message` ASC, `bicep_size` DESC LIMIT 3", resultRowsFromDB: [][]any{ {someTime, int64(1), "m1"}, {someTime, int64(2), "m2"}, @@ -1268,7 +1273,7 @@ func TestSearchAfterParameter_sortByMultipleFields(t *testing.T) { }, { request: `{"search_after": [1706551896491, "m3", 3], "size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}, {"message": {"order": "asc"}}, {"bicep_size": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "bicep_size", "message" FROM __quesma_table_name WHERE tuple(fromUnixTimestamp64Milli(1706551896491), "message", 3)>tuple("@timestamp", 'm3', "bicep_size") ORDER BY "@timestamp" DESC, "message" ASC, "bicep_size" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `bicep_size`, `message` FROM `__quesma_table_name` WHERE tuple(fromUnixTimestamp64Milli(1706551896491), `message`, 3)>tuple(`@timestamp`, 'm3', `bicep_size`) ORDER BY `@timestamp` DESC, `message` ASC, `bicep_size` DESC LIMIT 3", resultRowsFromDB: [][]any{ {someTime, int64(4), "m4"}, {someTime, int64(5), "m5"}, @@ -1282,7 +1287,7 @@ func TestSearchAfterParameter_sortByMultipleFields(t *testing.T) { }, { request: `{"search_after": [1706551896491, "m5", 0], "size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}, {"message": {"order": "asc"}}, {"bicep_size": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "bicep_size", "message" FROM __quesma_table_name WHERE tuple(fromUnixTimestamp64Milli(1706551896491), "message", 0)>tuple("@timestamp", 'm5', "bicep_size") ORDER BY "@timestamp" DESC, "message" ASC, "bicep_size" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `bicep_size`, `message` FROM `__quesma_table_name` WHERE tuple(fromUnixTimestamp64Milli(1706551896491), `message`, 0)>tuple(`@timestamp`, 'm5', `bicep_size`) ORDER BY `@timestamp` DESC, `message` ASC, `bicep_size` DESC LIMIT 3", resultRowsFromDB: [][]any{ {sub(1), int64(0), "m6"}, {sub(1), int64(0), "m7"}, @@ -1296,7 +1301,7 @@ func TestSearchAfterParameter_sortByMultipleFields(t *testing.T) { }, { request: `{"search_after": [1706551896491, "m8", 0], "size": 3, "track_total_hits": false, "sort": [{"@timestamp": {"order": "desc"}}, {"message": {"order": "asc"}}, {"bicep_size": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "bicep_size", "message" FROM __quesma_table_name WHERE tuple(fromUnixTimestamp64Milli(1706551896491), "message", 0)>tuple("@timestamp", 'm8', "bicep_size") ORDER BY "@timestamp" DESC, "message" ASC, "bicep_size" DESC LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `bicep_size`, `message` FROM `__quesma_table_name` WHERE tuple(fromUnixTimestamp64Milli(1706551896491), `message`, 0)>tuple(`@timestamp`, 'm8', `bicep_size`) ORDER BY `@timestamp` DESC, `message` ASC, `bicep_size` DESC LIMIT 3", resultRowsFromDB: [][]any{ {sub(1), int64(0), "m9"}, {sub(2), int64(0), "m10"}, @@ -1407,7 +1412,7 @@ func TestSearchAfterParameter_sortByNoField(t *testing.T) { }{ { request: `{"size": 3, "track_total_hits": false, "sort": [{"_score": {"order": "desc"}}]}`, - expectedSQL: `SELECT "@timestamp", "bicep_size", "message" FROM __quesma_table_name LIMIT 3`, + expectedSQL: "SELECT `@timestamp`, `bicep_size`, `message` FROM `__quesma_table_name` LIMIT 3", resultRowsFromDB: [][]any{ {someTime, int64(1), "m1"}, {someTime, int64(2), "m2"}, diff --git a/platform/frontend_connectors/terms_enum_test.go b/platform/frontend_connectors/terms_enum_test.go index 08ee6e267..55fec3886 100644 --- a/platform/frontend_connectors/terms_enum_test.go +++ b/platform/frontend_connectors/terms_enum_test.go @@ -68,7 +68,7 @@ var rawRequestBody = []byte(`{ } }`) -func testHandleTermsEnumRequest(t *testing.T, requestBody []byte, fieldName string) { +func testHandleTermsEnumRequest(t *testing.T, requestBody []byte, fieldName string, isContainsFunctionName bool) { table := &database_common.Table{ Name: testTableName, Config: database_common.NewDefaultCHConfig(), @@ -127,10 +127,16 @@ func testHandleTermsEnumRequest(t *testing.T, requestBody []byte, fieldName stri } ctx = context.WithValue(context.Background(), tracing.RequestIdCtxKey, "test") qt := &elastic_query_dsl.ClickhouseQueryTranslator{Table: table, Ctx: ctx, Schema: s.Tables[schema.IndexName(testTableName)]} - // Here we additionally verify that terms for `_tier` are **NOT** included in the SQL query - expectedQuery1 := fmt.Sprintf(`SELECT DISTINCT %s FROM %s WHERE (("epoch_time">=fromUnixTimestamp(1709036700) AND "epoch_time"<=fromUnixTimestamp(1709037659)) AND ("epoch_time_datetime64">=fromUnixTimestamp64Milli(1709036700000) AND "epoch_time_datetime64"<=fromUnixTimestamp64Milli(1709037659999))) LIMIT 13`, fieldName, testTableName) - expectedQuery2 := fmt.Sprintf(`SELECT DISTINCT %s FROM %s WHERE (("epoch_time">=fromUnixTimestamp(1709036700) AND "epoch_time"<=fromUnixTimestamp(1709037659)) AND ("epoch_time_datetime64">=fromUnixTimestamp64Milli(1709036700000) AND "epoch_time_datetime64"<=fromUnixTimestamp64Milli(1709037659999))) LIMIT 13`, fieldName, testTableName) - + var expectedQuery1 string + var expectedQuery2 string + if isContainsFunctionName { + expectedQuery1 = fmt.Sprintf("SELECT DISTINCT %s FROM `%s` WHERE ((`epoch_time`>=fromUnixTimestamp(1709036700) AND `epoch_time`<=fromUnixTimestamp(1709037659)) AND (`epoch_time_datetime64`>=fromUnixTimestamp64Milli(1709036700000) AND `epoch_time_datetime64`<=fromUnixTimestamp64Milli(1709037659999))) LIMIT 13", fieldName, testTableName) + expectedQuery2 = fmt.Sprintf("SELECT DISTINCT %s FROM `%s` WHERE ((`epoch_time`>=fromUnixTimestamp(1709036700) AND `epoch_time`<=fromUnixTimestamp(1709037659)) AND (`epoch_time_datetime64`>=fromUnixTimestamp64Milli(1709036700000) AND `epoch_time_datetime64`<=fromUnixTimestamp64Milli(1709037659999))) LIMIT 13", fieldName, testTableName) + } else { + // Here we additionally verify that terms for `_tier` are **NOT** included in the SQL query + expectedQuery1 = fmt.Sprintf("SELECT DISTINCT `%s` FROM `%s` WHERE ((`epoch_time`>=fromUnixTimestamp(1709036700) AND `epoch_time`<=fromUnixTimestamp(1709037659)) AND (`epoch_time_datetime64`>=fromUnixTimestamp64Milli(1709036700000) AND `epoch_time_datetime64`<=fromUnixTimestamp64Milli(1709037659999))) LIMIT 13", fieldName, testTableName) + expectedQuery2 = fmt.Sprintf("SELECT DISTINCT `%s` FROM `%s` WHERE ((`epoch_time`>=fromUnixTimestamp(1709036700) AND `epoch_time`<=fromUnixTimestamp(1709037659)) AND (`epoch_time_datetime64`>=fromUnixTimestamp64Milli(1709036700000) AND `epoch_time_datetime64`<=fromUnixTimestamp64Milli(1709037659999))) LIMIT 13", fieldName, testTableName) + } // Once in a while `AND` conditions could be swapped, so we match both cases mock.ExpectQuery(fmt.Sprintf("%s|%s", regexp.QuoteMeta(expectedQuery1), regexp.QuoteMeta(expectedQuery2))). WillReturnRows(sqlmock.NewRows([]string{"client_name"}).AddRow("client_a").AddRow("client_b")) @@ -153,17 +159,17 @@ func testHandleTermsEnumRequest(t *testing.T, requestBody []byte, fieldName stri } func TestHandleTermsEnumRequest(t *testing.T) { - testHandleTermsEnumRequest(t, rawRequestBody, `"client_name"`) + testHandleTermsEnumRequest(t, rawRequestBody, "client_name", false) } // Basic test. // "client.name" should be replaced by "client_name", and results should stay the same func TestIfHandleTermsEnumUsesSchema(t *testing.T) { requestBodyWithAliasedField := bytes.ReplaceAll(rawRequestBody, []byte(`"field": "client_name"`), []byte(`"field": "client.name"`)) - testHandleTermsEnumRequest(t, requestBodyWithAliasedField, `"client_name"`) + testHandleTermsEnumRequest(t, requestBodyWithAliasedField, "client_name", false) } func TestIfHandleTermsEnumUsesSchemaForMapColumn(t *testing.T) { requestBodyWithAliasedField := bytes.ReplaceAll(rawRequestBody, []byte(`"field": "client_name"`), []byte(`"field": "map_name.key_name"`)) - testHandleTermsEnumRequest(t, requestBodyWithAliasedField, "arrayElement(\"map_name\",'key_name')") + testHandleTermsEnumRequest(t, requestBodyWithAliasedField, "arrayElement(`map_name`,'key_name')", true) } diff --git a/platform/go.sum b/platform/go.sum index a5cf61188..8df2e4395 100644 --- a/platform/go.sum +++ b/platform/go.sum @@ -1,38 +1,113 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go/v2 v2.32.2 h1:Y8fAXt0CpLhqNXMLlSddg+cMfAr7zHBWqXLpih6ozCY= github.com/ClickHouse/clickhouse-go/v2 v2.32.2/go.mod h1:/vE8N/+9pozLkIiTMWbNUGviccDv/czEGS1KACvpXIk= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.57.0/go.mod h1:Po5HwoDd4FmT/EqgrE9x7Zz4LjxtGBSIuNY1C1lppBQ= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= github.com/DataDog/go-sqllexer v0.1.1 h1:45wV74bIqAeEGUd3VyM78I7tfarjAY/XZzklJ+FQjmk= github.com/DataDog/go-sqllexer v0.1.1/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/H0llyW00dzZ/cidr v1.2.1 h1:DfRHX+RqVVKZijQGO1aJSaWvN9Saan8sycK/4wrfY5g= github.com/H0llyW00dzZ/cidr v1.2.1/go.mod h1:S+EgYkMandSAN27mGNG/CB3jeoXDAyalsvvVFpWdnXc= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Shopify/toxiproxy/v2 v2.9.0/go.mod h1:2uPRyxR46fsx2yUr9i8zcejzdkWfK7p6G23jV/X6YNs= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15/go.mod h1:0QEmQSSWMVfiAk93l1/ayR9DQ9+jwni7gHS2NARZXB0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0= github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dmarkham/enumer v1.5.10/go.mod h1:e4VILe2b1nYK3JKJpRmNdl5xbDQvELc6tQ8b+GsGk6E= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -44,10 +119,14 @@ github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -55,25 +134,47 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= github.com/huandu/go-clone v1.7.2 h1:3+Aq0Ed8XK+zKkLjE2dfHg0XrpIfcohBE1K+c8Usxoo= github.com/huandu/go-clone v1.7.2/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ip2location/ip2location-go/v9 v9.7.1 h1:eXu/DqS13QE0h1Yrc9oji+6/anLD9KDf6Ulf5GdIQs8= github.com/ip2location/ip2location-go/v9 v9.7.1/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -84,6 +185,10 @@ github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= @@ -94,6 +199,7 @@ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCy github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/parsers/json v0.1.0 h1:dzSZl5pf5bBcW0Acnu20Djleto19T0CfHcvZ14NJ6fU= @@ -111,33 +217,77 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA= github.com/markbates/goth v1.80.0 h1:NnvatczZDzOs1hn9Ug+dVYf2Viwwkp/ZDX5K+GLjan8= github.com/markbates/goth v1.80.0/go.mod h1:4/GYHo+W6NWisrMPZnq0Yr2Q70UntNLn7KXEFhrIdAY= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opentracing-contrib/go-grpc v0.0.0-20240724223109-9dec25a38fa8/go.mod h1:z1k3YVSdAPSXtMUPS1TBWG5DaNWlT+VCbB0Qm3QJe74= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -153,13 +303,20 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -168,17 +325,30 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b h1:MNaGusDfB1qxEsl6iVb33Gbe777IKzPP5PDta0xGC8M= github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -190,29 +360,50 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= +go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -223,6 +414,7 @@ golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -253,6 +445,7 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -265,27 +458,49 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= +google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.1/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.60.1/go.mod h1:xJuobKuNxKH3RUatS7GjR+suWj+5c2K7bi4m/S5arOY= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= vitess.io/vitess v0.21.2 h1:SbMdGngyhYurvh2KTZ92VkR5DH5taib+HH4xTkftUWU= vitess.io/vitess v0.21.2/go.mod h1:n37n5rmIBHYWnoPZod9umrtExlUR/9SbR3VGmanYNMU= diff --git a/platform/model/expr.go b/platform/model/expr.go index 0a1ab86a2..a682b9dff 100644 --- a/platform/model/expr.go +++ b/platform/model/expr.go @@ -4,8 +4,8 @@ package model import ( "fmt" + "github.com/QuesmaOrg/quesma/platform/util" "github.com/k0kubun/pp" - "strconv" ) // Expr is a generic representation of an expression which is a part of the SQL query. @@ -301,7 +301,7 @@ func NewAliasedExpr(expr Expr, alias string) AliasedExpr { func (a AliasedExpr) Accept(v ExprVisitor) interface{} { return v.VisitAliasedExpr(a) } func (a AliasedExpr) AliasRef() LiteralExpr { - return LiteralExpr{Value: strconv.Quote(a.Alias)} + return LiteralExpr{Value: util.BackquoteIdentifier(a.Alias)} } // WindowFunction representation e.g. `SUM(x) OVER (PARTITION BY y ORDER BY z)` diff --git a/platform/model/expr_string_renderer.go b/platform/model/expr_string_renderer.go index 99100a1a6..9968ea36c 100644 --- a/platform/model/expr_string_renderer.go +++ b/platform/model/expr_string_renderer.go @@ -7,14 +7,10 @@ import ( "github.com/QuesmaOrg/quesma/platform/logger" "github.com/QuesmaOrg/quesma/platform/types" "github.com/QuesmaOrg/quesma/platform/util" - "regexp" "sort" - "strconv" "strings" ) -var identifierRegexp = regexp.MustCompile(`^([a-zA-Z_][a-zA-Z0-9_]*|".*")$`) - type renderer struct{} // AsString renders the given expression to string which can be used to build SQL query @@ -32,9 +28,9 @@ func (v *renderer) VisitColumnRef(e ColumnRef) interface{} { name = strings.TrimSuffix(name, types.MultifieldMapKeysSuffix) name = strings.TrimSuffix(name, types.MultifieldMapValuesSuffix) if len(e.TableAlias) > 0 { - return fmt.Sprintf("%s.%s", strconv.Quote(e.TableAlias), strconv.Quote(name)) + return fmt.Sprintf("%s.%s", util.BackquoteIdentifier(e.TableAlias), util.BackquoteIdentifier(name)) } else { - return strconv.Quote(name) + return util.BackquoteIdentifier(name) } } @@ -153,24 +149,17 @@ func (v *renderer) VisitTableRef(e TableRef) interface{} { var result []string if e.DatabaseName != "" { - if identifierRegexp.MatchString(e.DatabaseName) { - result = append(result, e.DatabaseName) - } else { - result = append(result, strconv.Quote(e.DatabaseName)) - } + result = append(result, util.BackquoteIdentifier(e.DatabaseName)) } - if identifierRegexp.MatchString(e.Name) { - result = append(result, e.Name) - } else { - result = append(result, strconv.Quote(e.Name)) - } + // append table name + result = append(result, util.BackquoteIdentifier(e.Name)) return strings.Join(result, ".") } func (v *renderer) VisitAliasedExpr(e AliasedExpr) interface{} { - return fmt.Sprintf("%s AS %s", e.Expr.Accept(v).(string), strconv.Quote(e.Alias)) + return fmt.Sprintf("%s AS %s", e.Expr.Accept(v).(string), util.BackquoteIdentifier(e.Alias)) } func (v *renderer) VisitSelectCommand(c SelectCommand) interface{} { diff --git a/platform/optimize/pipeline_test.go b/platform/optimize/pipeline_test.go index 07db87394..f8eb60f8c 100644 --- a/platform/optimize/pipeline_test.go +++ b/platform/optimize/pipeline_test.go @@ -432,7 +432,7 @@ func Test_materialized_view_replace(t *testing.T) { "materialized_view_replace": { Properties: map[string]string{ "table": "foo", - "condition": `"a">10`, + "condition": "`a`>10", "view": "foo_view", }, }, diff --git a/platform/parsers/elastic_query_dsl/lucene/lucene_parser_test.go b/platform/parsers/elastic_query_dsl/lucene/lucene_parser_test.go index 9e7cdea6a..74805d9bb 100644 --- a/platform/parsers/elastic_query_dsl/lucene/lucene_parser_test.go +++ b/platform/parsers/elastic_query_dsl/lucene/lucene_parser_test.go @@ -17,72 +17,71 @@ func TestTranslatingLuceneQueriesToSQL(t *testing.T) { query string want string }{ - {`title:"The Right Way" AND text:go!!`, `("title" __quesma_match 'The Right Way' AND "text" __quesma_match 'go!!')`}, - {`title:Do it right AND right`, `((("title" __quesma_match 'Do' OR ("title" __quesma_match 'it' OR "text" __quesma_match 'it')) OR ("title" __quesma_match 'right' OR "text" __quesma_match 'right')) AND ("title" __quesma_match 'right' OR "text" __quesma_match 'right'))`}, - {`roam~`, `("title" __quesma_match 'roam' OR "text" __quesma_match 'roam')`}, - {`roam~0.8`, `("title" __quesma_match 'roam' OR "text" __quesma_match 'roam')`}, - {`jakarta^4 apache`, `(("title" __quesma_match 'jakarta' OR "text" __quesma_match 'jakarta') OR ("title" __quesma_match 'apache' OR "text" __quesma_match 'apache'))`}, - {`"jakarta apache"^10`, `("title" __quesma_match 'jakarta apache' OR "text" __quesma_match 'jakarta apache')`}, - {`"jakarta apache"~10`, `("title" __quesma_match 'jakarta apache' OR "text" __quesma_match 'jakarta apache')`}, - {`mod_date:[2002-01-01 TO 2003-02-15]`, `("mod_date" >= '2002-01-01' AND "mod_date" <= '2003-02-15')`}, // 7 - {`mod_date:[2002-01-01 TO 2003-02-15}`, `("mod_date" >= '2002-01-01' AND "mod_date" < '2003-02-15')`}, - {`age:>10`, `"age" > '10'`}, - {`age:>=10`, `"age" >= '10'`}, - {`age:<10`, `"age" < '10'`}, - {`age:<=10.2`, `"age" <= '10.2'`}, - {`age:10.2`, `"age" = 10.2`}, - {`age:-10.2`, `"age" = -10.2`}, - {`age:<-10.2`, `"age" < '-10.2'`}, - {`age: 10.2`, `"age" = 10.2`}, - {`age: <-10.2`, `"age" < '-10.2'`}, - {`age: < -10.2`, `"age" < '-10.2'`}, - {`age:10.2 age2:[12 TO 15] age3:{11 TO *}`, `(("age" = 10.2 OR ("age2" >= '12' AND "age2" <= '15')) OR "age3" > '11')`}, - {`date:{* TO 2012-01-01} another`, `("date" < '2012-01-01' OR ("title" __quesma_match 'another' OR "text" __quesma_match 'another'))`}, - {`date:{2012-01-15 TO *} another`, `("date" > '2012-01-15' OR ("title" __quesma_match 'another' OR "text" __quesma_match 'another'))`}, - {`date:{* TO *}`, `"date" IS NOT NULL`}, - {`title:{Aida TO Carmen]`, `("title" > 'Aida' AND "title" <= 'Carmen')`}, - {`count:[1 TO 5]`, `("count" >= '1' AND "count" <= '5')`}, // 17 - {`"jakarta apache" AND "Apache Lucene"`, `(("title" __quesma_match 'jakarta apache' OR "text" __quesma_match 'jakarta apache') AND ("title" __quesma_match 'Apache Lucene' OR "text" __quesma_match 'Apache Lucene'))`}, - {`NOT status:"jakarta apache"`, `NOT ("status" __quesma_match 'jakarta apache')`}, - {`"jakarta apache" NOT "Apache Lucene"`, `(("title" __quesma_match 'jakarta apache' OR "text" __quesma_match 'jakarta apache') AND NOT (("title" __quesma_match 'Apache Lucene' OR "text" __quesma_match 'Apache Lucene')))`}, - {`(jakarta OR apache) AND website`, `(((("title" __quesma_match 'jakarta' OR "text" __quesma_match 'jakarta')) OR ("title" __quesma_match 'apache' OR "text" __quesma_match 'apache')) AND ("title" __quesma_match 'website' OR "text" __quesma_match 'website'))`}, - {`title:(return "pink panther")`, `("title" __quesma_match 'return' OR "title" __quesma_match 'pink panther')`}, - {`status:(active OR pending) title:(full text search)^2`, `(("status" __quesma_match 'active' OR "status" __quesma_match 'pending') OR (("title" __quesma_match 'full' OR "title" __quesma_match 'text') OR "title" __quesma_match 'search'))`}, - {`status:(active OR NOT (pending AND in-progress)) title:(full text search)^2`, `(("status" __quesma_match 'active' OR NOT (("status" __quesma_match 'pending' AND "status" __quesma_match 'in-progress'))) OR (("title" __quesma_match 'full' OR "title" __quesma_match 'text') OR "title" __quesma_match 'search'))`}, - {`status:(NOT active OR NOT (pending AND in-progress)) title:(full text search)^2`, `((NOT ("status" __quesma_match 'active') OR NOT (("status" __quesma_match 'pending' AND "status" __quesma_match 'in-progress'))) OR (("title" __quesma_match 'full' OR "title" __quesma_match 'text') OR "title" __quesma_match 'search'))`}, - {`status:(active OR (pending AND in-progress)) title:(full text search)^2`, `(("status" __quesma_match 'active' OR ("status" __quesma_match 'pending' AND "status" __quesma_match 'in-progress')) OR (("title" __quesma_match 'full' OR "title" __quesma_match 'text') OR "title" __quesma_match 'search'))`}, - {`status:((a OR (b AND c)) AND d)`, `(("status" __quesma_match 'a' OR ("status" __quesma_match 'b' AND "status" __quesma_match 'c')) AND "status" __quesma_match 'd')`}, - {`title:(return [Aida TO Carmen])`, `("title" __quesma_match 'return' OR ("title" >= 'Aida' AND "title" <= 'Carmen'))`}, - {`host.name:(NOT active OR NOT (pending OR in-progress)) (full text search)^2`, `((((NOT ("host.name" __quesma_match 'active') OR NOT (("host.name" __quesma_match 'pending' OR "host.name" __quesma_match 'in-progress'))) OR (("title" __quesma_match 'full' OR "text" __quesma_match 'full'))) OR ("title" __quesma_match 'text' OR "text" __quesma_match 'text')) OR ("title" __quesma_match 'search' OR "text" __quesma_match 'search'))`}, - {`host.name:(active AND NOT (pending OR in-progress)) hermes nemesis^2`, `((("host.name" __quesma_match 'active' AND NOT (("host.name" __quesma_match 'pending' OR "host.name" __quesma_match 'in-progress'))) OR ("title" __quesma_match 'hermes' OR "text" __quesma_match 'hermes')) OR ("title" __quesma_match 'nemesis' OR "text" __quesma_match 'nemesis'))`}, - + {`title:"The Right Way" AND text:go!!`, "(`title` __quesma_match 'The Right Way' AND `text` __quesma_match 'go!!')"}, + {`title:Do it right AND right`, "(((`title` __quesma_match 'Do' OR (`title` __quesma_match 'it' OR `text` __quesma_match 'it')) OR (`title` __quesma_match 'right' OR `text` __quesma_match 'right')) AND (`title` __quesma_match 'right' OR `text` __quesma_match 'right'))"}, + {`roam~`, "(`title` __quesma_match 'roam' OR `text` __quesma_match 'roam')"}, + {`roam~0.8`, "(`title` __quesma_match 'roam' OR `text` __quesma_match 'roam')"}, + {`jakarta^4 apache`, "((`title` __quesma_match 'jakarta' OR `text` __quesma_match 'jakarta') OR (`title` __quesma_match 'apache' OR `text` __quesma_match 'apache'))"}, + {`"jakarta apache"^10`, "(`title` __quesma_match 'jakarta apache' OR `text` __quesma_match 'jakarta apache')"}, + {`"jakarta apache"~10`, "(`title` __quesma_match 'jakarta apache' OR `text` __quesma_match 'jakarta apache')"}, + {`mod_date:[2002-01-01 TO 2003-02-15]`, "(`mod_date` >= '2002-01-01' AND `mod_date` <= '2003-02-15')"}, // 7 + {`mod_date:[2002-01-01 TO 2003-02-15}`, "(`mod_date` >= '2002-01-01' AND `mod_date` < '2003-02-15')"}, + {`age:>10`, "`age` > '10'"}, + {`age:>=10`, "`age` >= '10'"}, + {`age:<10`, "`age` < '10'"}, + {`age:<=10.2`, "`age` <= '10.2'"}, + {`age:10.2`, "`age` = 10.2"}, + {`age:-10.2`, "`age` = -10.2"}, + {`age:<-10.2`, "`age` < '-10.2'"}, + {`age:<10.2`, "`age` < '10.2'"}, + {`age:<-10.2`, "`age` < '-10.2'"}, + {`age:<-10.2`, "`age` < '-10.2'"}, + {`age:10.2 age2:[12 TO 15] age3:{11 TO *}`, "((`age` = 10.2 OR (`age2` >= '12' AND `age2` <= '15')) OR `age3` > '11')"}, + {`date:{* TO 2012-01-01} another`, "(`date` < '2012-01-01' OR (`title` __quesma_match 'another' OR `text` __quesma_match 'another'))"}, + {`date:{2012-01-15 TO *} another`, "(`date` > '2012-01-15' OR (`title` __quesma_match 'another' OR `text` __quesma_match 'another'))"}, + {`date:{* TO *}`, "`date` IS NOT NULL"}, + {`title:{Aida TO Carmen]`, "(`title` > 'Aida' AND `title` <= 'Carmen')"}, + {`count:[1 TO 5]`, "(`count` >= '1' AND `count` <= '5')"}, // 17 + {`"jakarta apache" AND "Apache Lucene"`, "((`title` __quesma_match 'jakarta apache' OR `text` __quesma_match 'jakarta apache') AND (`title` __quesma_match 'Apache Lucene' OR `text` __quesma_match 'Apache Lucene'))"}, + {`NOT status:"jakarta apache"`, "NOT (`status` __quesma_match 'jakarta apache')"}, + {`"jakarta apache" NOT "Apache Lucene"`, "((`title` __quesma_match 'jakarta apache' OR `text` __quesma_match 'jakarta apache') AND NOT ((`title` __quesma_match 'Apache Lucene' OR `text` __quesma_match 'Apache Lucene')))"}, + {`(jakarta OR apache) AND website`, "((((`title` __quesma_match 'jakarta' OR `text` __quesma_match 'jakarta')) OR (`title` __quesma_match 'apache' OR `text` __quesma_match 'apache')) AND (`title` __quesma_match 'website' OR `text` __quesma_match 'website'))"}, + {`title:(return "pink panther")`, "(`title` __quesma_match 'return' OR `title` __quesma_match 'pink panther')"}, + {`status:(active OR pending) title:(full text search)^2`, "((`status` __quesma_match 'active' OR `status` __quesma_match 'pending') OR ((`title` __quesma_match 'full' OR `title` __quesma_match 'text') OR `title` __quesma_match 'search'))"}, + {`status:(active OR NOT (pending AND in-progress)) title:(full text search)^2`, "((`status` __quesma_match 'active' OR NOT ((`status` __quesma_match 'pending' AND `status` __quesma_match 'in-progress'))) OR ((`title` __quesma_match 'full' OR `title` __quesma_match 'text') OR `title` __quesma_match 'search'))"}, + {`status:(NOT active OR NOT (pending AND in-progress)) title:(full text search)^2`, "((NOT (`status` __quesma_match 'active') OR NOT ((`status` __quesma_match 'pending' AND `status` __quesma_match 'in-progress'))) OR ((`title` __quesma_match 'full' OR `title` __quesma_match 'text') OR `title` __quesma_match 'search'))"}, + {`status:(active OR (pending AND in-progress)) title:(full text search)^2`, "((`status` __quesma_match 'active' OR (`status` __quesma_match 'pending' AND `status` __quesma_match 'in-progress')) OR ((`title` __quesma_match 'full' OR `title` __quesma_match 'text') OR `title` __quesma_match 'search'))"}, + {`status:((a OR (b AND c)) AND d)`, "((`status` __quesma_match 'a' OR (`status` __quesma_match 'b' AND `status` __quesma_match 'c')) AND `status` __quesma_match 'd')"}, + {`title:(return [Aida TO Carmen])`, "(`title` __quesma_match 'return' OR (`title` >= 'Aida' AND `title` <= 'Carmen'))"}, + {`host.name:(NOT active OR NOT (pending OR in-progress)) (full text search)^2`, "((((NOT (`host.name` __quesma_match 'active') OR NOT ((`host.name` __quesma_match 'pending' OR `host.name` __quesma_match 'in-progress'))) OR ((`title` __quesma_match 'full' OR `text` __quesma_match 'full'))) OR (`title` __quesma_match 'text' OR `text` __quesma_match 'text')) OR (`title` __quesma_match 'search' OR `text` __quesma_match 'search'))"}, + {`host.name:(active AND NOT (pending OR in-progress)) hermes nemesis^2`, "(((`host.name` __quesma_match 'active' AND NOT ((`host.name` __quesma_match 'pending' OR `host.name` __quesma_match 'in-progress'))) OR (`title` __quesma_match 'hermes' OR `text` __quesma_match 'hermes')) OR (`title` __quesma_match 'nemesis' OR `text` __quesma_match 'nemesis'))"}, // special characters - {`dajhd \(%&RY#WFDG`, `(("title" __quesma_match 'dajhd' OR "text" __quesma_match 'dajhd') OR ("title" __quesma_match '(\%&RY#WFDG' OR "text" __quesma_match '(\%&RY#WFDG'))`}, - {`x:aaa'bbb`, `"x" __quesma_match 'aaa\'bbb'`}, - {`x:aaa\bbb`, `"x" __quesma_match 'aaa\\bbb'`}, - {`x:aaa*bbb`, `"x" __quesma_match 'aaa%bbb'`}, - {`x:aaa_bbb`, `"x" __quesma_match 'aaa\_bbb'`}, - {`x:aaa%bbb`, `"x" __quesma_match 'aaa\%bbb'`}, - {`x:aaa%\*_bbb`, `"x" __quesma_match 'aaa\%*\_bbb'`}, + {`dajhd \(%&RY#WFDG`, "((`title` __quesma_match 'dajhd' OR `text` __quesma_match 'dajhd') OR (`title` __quesma_match '(\\%&RY#WFDG' OR `text` __quesma_match '(\\%&RY#WFDG'))"}, + {`x:aaa'bbb`, "`x` __quesma_match 'aaa\\'bbb'"}, + {`x:aaa\bbb`, "`x` __quesma_match 'aaa\\\\bbb'"}, + {`x:aaa*bbb`, "`x` __quesma_match 'aaa%bbb'"}, + {`x:aaa_bbb`, "`x` __quesma_match 'aaa\\_bbb'"}, + {`x:aaa%bbb`, "`x` __quesma_match 'aaa\\%bbb'"}, + {`x:aaa%\*_bbb`, "`x` __quesma_match 'aaa\\%*\\_bbb'"}, // tests for wildcards - {"%", `("title" __quesma_match '\%' OR "text" __quesma_match '\%')`}, - {`*`, `("title" __quesma_match '%' OR "text" __quesma_match '%')`}, - {`*neme*`, `("title" __quesma_match '%neme%' OR "text" __quesma_match '%neme%')`}, - {`*nem?* abc:ne*`, `(("title" __quesma_match '%nem_%' OR "text" __quesma_match '%nem_%') OR "abc" __quesma_match 'ne%')`}, - {`title:(NOT a* AND NOT (b* OR *))`, `(NOT ("title" __quesma_match 'a%') AND NOT (("title" __quesma_match 'b%' OR "title" __quesma_match '%')))`}, - {`title:abc\*`, `"title" __quesma_match 'abc*'`}, - {`title:abc*\*`, `"title" __quesma_match 'abc%*'`}, - {`ab\+c`, `("title" __quesma_match 'ab+c' OR "text" __quesma_match 'ab+c')`}, - {`!db.str:FAIL`, `NOT ("db.str" __quesma_match 'FAIL')`}, - {`_exists_:title`, `"title" IS NOT NULL`}, - {`!_exists_:title`, `NOT ("title" IS NOT NULL)`}, - {"db.str:*weaver%12*", `"db.str" __quesma_match '%weaver\%12%'`}, - {"(db.str:*weaver*)", `("db.str" __quesma_match '%weaver%')`}, - {"(a.type:*ab* OR a.type:*Ab*)", `(("a.type" __quesma_match '%ab%') OR "a.type" __quesma_match '%Ab%')`}, - {"log: \"lalala lala la\" AND log: \"troll\"", `("log" __quesma_match 'lalala lala la' AND "log" __quesma_match 'troll')`}, - {"int: 20", `"int" = 20`}, - {`int: "20"`, `"int" __quesma_match '20'`}, + {"%", "(`title` __quesma_match '\\%' OR `text` __quesma_match '\\%')"}, + {`*`, "(`title` __quesma_match '%' OR `text` __quesma_match '%')"}, + {`*neme*`, "(`title` __quesma_match '%neme%' OR `text` __quesma_match '%neme%')"}, + {`*nem?* abc:ne*`, "((`title` __quesma_match '%nem_%' OR `text` __quesma_match '%nem_%') OR `abc` __quesma_match 'ne%')"}, + {`title:(NOT a* AND NOT (b* OR *))`, "(NOT (`title` __quesma_match 'a%') AND NOT ((`title` __quesma_match 'b%' OR `title` __quesma_match '%')))"}, + {`title:abc\*`, "`title` __quesma_match 'abc*'"}, + {`title:abc*\*`, "`title` __quesma_match 'abc%*'"}, + {`ab\+c`, "(`title` __quesma_match 'ab+c' OR `text` __quesma_match 'ab+c')"}, + {`!db.str:FAIL`, "NOT (`db.str` __quesma_match 'FAIL')"}, + {`_exists_:title`, "`title` IS NOT NULL"}, + {`!_exists_:title`, "NOT (`title` IS NOT NULL)"}, + {"db.str:*weaver%12*", "`db.str` __quesma_match '%weaver\\%12%'"}, + {"(db.str:*weaver*)", "(`db.str` __quesma_match '%weaver%')"}, + {"(a.type:*ab* OR a.type:*Ab*)", "((`a.type` __quesma_match '%ab%') OR `a.type` __quesma_match '%Ab%')"}, + {"log: \"lalala lala la\" AND log: \"troll\"", "(`log` __quesma_match 'lalala lala la' AND `log` __quesma_match 'troll')"}, + {"int: 20", "`int` = 20"}, + {`int: "20"`, "`int` __quesma_match '20'"}, } var randomQueriesWithPossiblyIncorrectInput = []struct { query string @@ -90,18 +89,18 @@ func TestTranslatingLuceneQueriesToSQL(t *testing.T) { }{ {``, `true`}, {` `, `true`}, - {` 2 `, `("title" = 2 OR "text" = 2)`}, - {` 2df$ ! `, `(("title" __quesma_match '2df$' OR "text" __quesma_match '2df$') AND NOT (false))`}, // TODO: this should probably just be "false" + {` 2 `, "(`title` = 2 OR `text` = 2)"}, + {` 2df$ ! `, "((`title` __quesma_match '2df$' OR `text` __quesma_match '2df$') AND NOT (false))"}, // TODO: this should probably just be "false" {`title:`, `false`}, - {`title: abc`, `"title" __quesma_match 'abc'`}, - {`title[`, `("title" __quesma_match 'title[' OR "text" __quesma_match 'title[')`}, - {`title[]`, `("title" __quesma_match 'title[]' OR "text" __quesma_match 'title[]')`}, - {`title[ TO ]`, `((("title" __quesma_match 'title[' OR "text" __quesma_match 'title[') OR ("title" __quesma_match 'TO' OR "text" __quesma_match 'TO')) OR ("title" __quesma_match ']' OR "text" __quesma_match ']'))`}, - {`title:[ TO 2]`, `("title" >= '' AND "title" <= '2')`}, - {` title `, `("title" __quesma_match 'title' OR "text" __quesma_match 'title')`}, - {` title : (+a -b c)`, `(("title" __quesma_match '+a' OR "title" __quesma_match '-b') OR "title" __quesma_match 'c')`}, // we don't support '+', '-' operators, but in that case the answer seems good enough + nothing crashes + {`title: abc`, "`title` __quesma_match 'abc'"}, + {`title[`, "(`title` __quesma_match 'title[' OR `text` __quesma_match 'title[')"}, + {`title[]`, "(`title` __quesma_match 'title[]' OR `text` __quesma_match 'title[]')"}, + {`title[ TO ]`, "(((`title` __quesma_match 'title[' OR `text` __quesma_match 'title[') OR (`title` __quesma_match 'TO' OR `text` __quesma_match 'TO')) OR (`title` __quesma_match ']' OR `text` __quesma_match ']'))"}, + {`title:[ TO 2]`, "(`title` >= '' AND `title` <= '2')"}, + {` title `, "(`title` __quesma_match 'title' OR `text` __quesma_match 'title')"}, + {` title : (+a -b c)`, "((`title` __quesma_match '+a' OR `title` __quesma_match '-b') OR `title` __quesma_match 'c')"}, // we don't support '+', '-' operators, but in that case the answer seems good enough + nothing crashes {`title:()`, `false`}, - {`() a`, `((false) OR ("title" __quesma_match 'a' OR "text" __quesma_match 'a'))`}, // a bit weird, but '(false)' is OK as I think nothing should match '()' + {`() a`, "((false) OR (`title` __quesma_match 'a' OR `text` __quesma_match 'a'))"}, // a bit weird, but '(false)' is OK as I think nothing should match '()' } currentSchema := schema.Schema{ @@ -127,8 +126,8 @@ func TestResolvePropertyNamesWhenTranslatingToSQL(t *testing.T) { mapping map[string]string want string }{ - {query: `title:"The Right Way" AND text:go!!`, mapping: map[string]string{}, want: `("title" __quesma_match 'The Right Way' AND "text" __quesma_match 'go!!')`}, - {query: `age:>10`, mapping: map[string]string{"age": "foo"}, want: `"foo" > '10'`}, + {query: `title:"The Right Way" AND text:go!!`, mapping: map[string]string{}, want: "(`title` __quesma_match 'The Right Way' AND `text` __quesma_match 'go!!')"}, + {query: `age:>10`, mapping: map[string]string{"age": "foo"}, want: "`foo` > '10'"}, } for i, tt := range properQueries { t.Run(util.PrettyTestName(tt.query, i), func(t *testing.T) { diff --git a/platform/parsers/elastic_query_dsl/pancake_sql_query_generation_test.go b/platform/parsers/elastic_query_dsl/pancake_sql_query_generation_test.go index c4de202d0..93c596462 100644 --- a/platform/parsers/elastic_query_dsl/pancake_sql_query_generation_test.go +++ b/platform/parsers/elastic_query_dsl/pancake_sql_query_generation_test.go @@ -246,13 +246,7 @@ func TestPancakeQueryGeneration_halfpancake(t *testing.T) { } `, - sql: ` -SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", count(*) AS "aggr__0__count" -FROM ` + TableName + ` -GROUP BY "host.name" AS "aggr__0__key_0" -ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC -LIMIT 4`, // -- we added one more as filtering nulls happens during rendering + sql: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n `host.name` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\nFROM `__quesma_table_name`\nGROUP BY `host.name` AS `aggr__0__key_0`\nORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC\nLIMIT 4", // -- we added one more as filtering nulls happens during rendering }, {"test2", @@ -275,14 +269,7 @@ LIMIT 4`, // -- we added one more as filtering nulls happens during rendering } } `, - ` -SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - avgOrNull("bytes_gauge") AS "metric__0__2_col_0" -FROM ` + TableName + ` -GROUP BY "host.name" AS "aggr__0__key_0" -ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC -LIMIT 4`, // we increased limit by 1 to allow filtering of nulls druing json rendering + "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n `host.name` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n avgOrNull(`bytes_gauge`) AS `metric__0__2_col_0`\nFROM `__quesma_table_name`\nGROUP BY `host.name` AS `aggr__0__key_0`\nORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC\nLIMIT 4", // we increased limit by 1 to allow filtering of nulls druing json rendering }, } diff --git a/platform/parsers/elastic_query_dsl/pancake_top_hits.go b/platform/parsers/elastic_query_dsl/pancake_top_hits.go index 4414daa41..d4f0e49d8 100644 --- a/platform/parsers/elastic_query_dsl/pancake_top_hits.go +++ b/platform/parsers/elastic_query_dsl/pancake_top_hits.go @@ -6,11 +6,12 @@ import ( "fmt" "github.com/QuesmaOrg/quesma/platform/model" "github.com/QuesmaOrg/quesma/platform/model/metrics_aggregations" + "github.com/QuesmaOrg/quesma/platform/util" "strconv" ) func (p *pancakeSqlQueryGenerator) quotedLiteral(name string) model.LiteralExpr { - return model.NewLiteral(strconv.Quote(name)) + return model.NewLiteral(util.BackquoteIdentifier(name)) } // generateSimpleTopHitsQuery generates an SQL for top_hits/top_metrics @@ -83,7 +84,7 @@ func (p *pancakeSqlQueryGenerator) generateTopHitsQuery(aggregation *pancakeMode hitTableName := "hit_table" groupTableLiteral := func(reference string) model.Expr { - return model.NewLiteral(strconv.Quote(groupTableName) + "." + strconv.Quote(reference)) + return model.NewLiteral(util.BackquoteIdentifier(groupTableName) + "." + util.BackquoteIdentifier(reference)) } convertColumnRefToHitTable := func(expr model.Expr) model.Expr { diff --git a/platform/parsers/elastic_query_dsl/query_parser_range_test.go b/platform/parsers/elastic_query_dsl/query_parser_range_test.go index 95262f592..d2e06bdd1 100644 --- a/platform/parsers/elastic_query_dsl/query_parser_range_test.go +++ b/platform/parsers/elastic_query_dsl/query_parser_range_test.go @@ -36,7 +36,7 @@ var parseRangeTests = []parseRangeTest{ }, Config: database_common.NewNoTimestampOnlyStringAttrCHConfig(), }, - `("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, + "(`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=fromUnixTimestamp64Milli(1707486436029))", }, { "parseDateTimeBestEffort", @@ -55,7 +55,7 @@ var parseRangeTests = []parseRangeTest{ }, Config: database_common.NewNoTimestampOnlyStringAttrCHConfig(), }, - `("timestamp">=fromUnixTimestamp(1706881636) AND "timestamp"<=fromUnixTimestamp(1707486436))`, + "(`timestamp`>=fromUnixTimestamp(1706881636) AND `timestamp`<=fromUnixTimestamp(1707486436))", }, { "numeric range", @@ -72,7 +72,7 @@ var parseRangeTests = []parseRangeTest{ }, Config: database_common.NewNoTimestampOnlyStringAttrCHConfig(), }, - `"time_taken">100`, + "`time_taken`>100", }, { "DateTime64", @@ -91,7 +91,7 @@ var parseRangeTests = []parseRangeTest{ }, Config: database_common.NewNoTimestampOnlyStringAttrCHConfig(), }, - `("timestamp">=fromUnixTimestamp64Milli(1706881636000) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436000))`, + "(`timestamp`>=fromUnixTimestamp64Milli(1706881636000) AND `timestamp`<=fromUnixTimestamp64Milli(1707486436000))", }, } diff --git a/platform/testdata/aggregation_requests.go b/platform/testdata/aggregation_requests.go index 572036db6..17c365a72 100644 --- a/platform/testdata/aggregation_requests.go +++ b/platform/testdata/aggregation_requests.go @@ -16,7 +16,7 @@ func groupBySQL(fieldName string, typ database_common.DateTimeType, groupByInter return model.AsString(database_common.TimestampGroupBy(model.NewColumnRef(fieldName), typ, groupByInterval)) } -const fullTextFieldName = `"` + model.FullTextFieldNamePlaceHolder + `"` +const fullTextFieldName = "`" + model.FullTextFieldNamePlaceHolder + "`" // TODO change some tests to size > 0, and track_total_hits different values var AggregationTests = []AggregationTestCase{ @@ -118,10 +118,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__minAgg_col_0", 100.14596557617188), }}, }, - ExpectedPancakeSQL: `SELECT maxOrNull("AvgTicketPrice") AS "metric__maxAgg_col_0", ` + - `minOrNull("AvgTicketPrice") AS "metric__minAgg_col_0" ` + - `FROM ` + TableName + ` ` + - `WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, + ExpectedPancakeSQL: "SELECT maxOrNull(`AvgTicketPrice`) AS `metric__maxAgg_col_0`,\n minOrNull(`AvgTicketPrice`) AS `metric__minAgg_col_0`\nFROM `__quesma_table_name`\nWHERE (`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=\n fromUnixTimestamp64Milli(1707486436029))", }, { // [1] TestName: "2 sibling count aggregations", @@ -306,16 +303,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0__3-bucket_col_0", 2), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "OriginCityName" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("FlightDelay" __quesma_match true) AS "metric__0__1-bucket_col_0", - countIf("Cancelled" __quesma_match true) AS "metric__0__3-bucket_col_0" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) - GROUP BY "OriginCityName" AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC - LIMIT 1001`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n `OriginCityName` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n countIf(`FlightDelay` __quesma_match true) AS `metric__0__1-bucket_col_0`,\n countIf(`Cancelled` __quesma_match true) AS `metric__0__3-bucket_col_0`\n FROM `__quesma_table_name`\n WHERE (`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=\n fromUnixTimestamp64Milli(1707486436029))\n GROUP BY `OriginCityName` AS `aggr__0__key_0`\n ORDER BY `aggr__0__key_0` ASC\n LIMIT 1001", }, { // [2] TestName: "date_histogram + size as string", @@ -497,30 +485,29 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__count", uint64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) - AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "FlightDelayType" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) - GROUP BY "FlightDelayType" AS "aggr__0__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=11 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC)\n" + + " AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `FlightDelayType` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=fromUnixTimestamp64Milli(1707486436029))\n" + + " GROUP BY `FlightDelayType` AS `aggr__0__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [3] TestName: "Sum", @@ -613,9 +600,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0_col_0", 76631.67578125), }}, }, - ExpectedPancakeSQL: `SELECT sumOrNull("taxful_total_price") AS "metric__0_col_0" ` + - `FROM ` + TableName + ` ` + - `WHERE ("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<=fromUnixTimestamp64Milli(1707818397034))`, + ExpectedPancakeSQL: "SELECT sumOrNull(`taxful_total_price`) AS `metric__0_col_0`\n FROM `__quesma_table_name`\n WHERE (`order_date`>=fromUnixTimestamp64Milli(1707213597034) AND `order_date`<=\n fromUnixTimestamp64Milli(1707818397034))", }, { // [4] TestName: "cardinality", @@ -740,17 +725,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__suggestions__count", uint64(32)), }}, }, - ExpectedPancakeSQL: ` - SELECT uniqMerge(uniqState("OriginCityName")) OVER () AS - "metric__unique_terms_col_0", - sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "OriginCityName" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) - GROUP BY "OriginCityName" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + ExpectedPancakeSQL: "SELECT uniqMerge(uniqState(`OriginCityName`)) OVER () AS `\n metric__unique_terms_col_0`,\n sum(count(*)) OVER () AS `aggr__suggestions__parent_count`,\n `OriginCityName` AS `aggr__suggestions__key_0`,\n count(*) AS `aggr__suggestions__count`\n FROM `__quesma_table_name`\n WHERE (`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=\n fromUnixTimestamp64Milli(1707486436029))\n GROUP BY `OriginCityName` AS `aggr__suggestions__key_0`\n ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC\n LIMIT 11", }, { // [5] TestName: "simple filter/count", @@ -860,10 +835,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0-bucket_col_0", uint64(553)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("FlightDelay" __quesma_match true) AS "metric__0-bucket_col_0" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, + ExpectedPancakeSQL: "SELECT countIf(`FlightDelay` __quesma_match true) AS `metric__0-bucket_col_0`\n FROM `__quesma_table_name`\n WHERE (`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=\n fromUnixTimestamp64Milli(1707486436029))", }, { // [6] TestName: "filters", @@ -1016,18 +988,16 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("filter_1__aggr__time_offset_split__count", uint64(351)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND - "timestamp"<=fromUnixTimestamp64Milli(1707486436029))) AS - "filter_0__aggr__time_offset_split__count", - countIf(("timestamp">=fromUnixTimestamp64Milli(1706276836029) AND "timestamp" - <=fromUnixTimestamp64Milli(1706881636029))) AS - "filter_1__aggr__time_offset_split__count" - FROM __quesma_table_name - WHERE ("FlightDelay" __quesma_match true AND (("timestamp">=fromUnixTimestamp64Milli( - 1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) OR ( - "timestamp">=fromUnixTimestamp64Milli(1706276836029) AND "timestamp"<= - fromUnixTimestamp64Milli(1706881636029))))`, + ExpectedPancakeSQL: "SELECT countIf((`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND\n" + + " `timestamp`<=fromUnixTimestamp64Milli(1707486436029))) AS\n" + + " `filter_0__aggr__time_offset_split__count`,\n" + + " countIf((`timestamp`>=fromUnixTimestamp64Milli(1706276836029) AND `timestamp`<=fromUnixTimestamp64Milli(1706881636029))) AS\n" + + " `filter_1__aggr__time_offset_split__count`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`FlightDelay` __quesma_match true AND ((`timestamp`>=fromUnixTimestamp64Milli(\n" + + " 1706881636029) AND `timestamp`<=fromUnixTimestamp64Milli(1707486436029)) OR (\n" + + " `timestamp`>=fromUnixTimestamp64Milli(1706276836029) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1706881636029))))", }, { // [7] TestName: "top hits, quite complex", @@ -1389,100 +1359,97 @@ var AggregationTests = []AggregationTestCase{ }, }, }, - ExpectedPancakeSQL: ` - WITH quesma_top_hits_group_table AS ( - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "aggr__origins__distinations__parent_count", - "aggr__origins__distinations__key_0", "aggr__origins__distinations__count", - "aggr__origins__order_1_rank", "aggr__origins__distinations__order_1_rank" - FROM ( - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "aggr__origins__distinations__parent_count", - "aggr__origins__distinations__key_0", - "aggr__origins__distinations__count", - dense_rank() OVER (ORDER BY "aggr__origins__count" DESC, - "aggr__origins__key_0" ASC) AS "aggr__origins__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__origins__key_0" ORDER BY - "aggr__origins__distinations__count" DESC, - "aggr__origins__distinations__key_0" ASC) AS - "aggr__origins__distinations__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__origins__parent_count", - "OriginAirportID" AS "aggr__origins__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__origins__key_0") AS - "aggr__origins__count", - sum(count(*)) OVER (PARTITION BY "aggr__origins__key_0") AS - "aggr__origins__distinations__parent_count", - "DestAirportID" AS "aggr__origins__distinations__key_0", - count(*) AS "aggr__origins__distinations__count" - FROM __quesma_table_name - GROUP BY "OriginAirportID" AS "aggr__origins__key_0", - "DestAirportID" AS "aggr__origins__distinations__key_0")) - WHERE ("aggr__origins__order_1_rank"<=10001 AND - "aggr__origins__distinations__order_1_rank"<=10001) - ORDER BY "aggr__origins__order_1_rank" ASC, - "aggr__origins__distinations__order_1_rank" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__origins__parent_count" AS - "aggr__origins__parent_count", - "group_table"."aggr__origins__key_0" AS "aggr__origins__key_0", - "group_table"."aggr__origins__count" AS "aggr__origins__count", - "group_table"."aggr__origins__distinations__parent_count" AS - "aggr__origins__distinations__parent_count", - "group_table"."aggr__origins__distinations__key_0" AS - "aggr__origins__distinations__key_0", - "group_table"."aggr__origins__distinations__count" AS - "aggr__origins__distinations__count", - "hit_table"."DestLocation" AS - "top_hits__origins__distinations__destLocation_col_0", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__origins__key_0", - "group_table"."aggr__origins__distinations__key_0") AS "top_hits_rank", - "group_table"."aggr__origins__order_1_rank" AS "aggr__origins__order_1_rank" - , - "group_table"."aggr__origins__distinations__order_1_rank" AS - "aggr__origins__distinations__order_1_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON (("group_table"."aggr__origins__key_0" - ="hit_table"."OriginAirportID" AND - "group_table"."aggr__origins__distinations__key_0"= - "hit_table"."DestAirportID"))) - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "aggr__origins__distinations__parent_count", - "aggr__origins__distinations__key_0", "aggr__origins__distinations__count", - "top_hits__origins__distinations__destLocation_col_0", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=1 - ORDER BY "aggr__origins__order_1_rank" ASC, - "aggr__origins__distinations__order_1_rank" ASC, "top_hits_rank" ASC`, - ExpectedAdditionalPancakeSQLs: []string{` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__origins__parent_count", - "OriginAirportID" AS "aggr__origins__key_0", - count(*) AS "aggr__origins__count" - FROM __quesma_table_name - GROUP BY "OriginAirportID" AS "aggr__origins__key_0" - ORDER BY "aggr__origins__count" DESC, "aggr__origins__key_0" ASC - LIMIT 10001) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__origins__parent_count" AS - "aggr__origins__parent_count", - "group_table"."aggr__origins__key_0" AS "aggr__origins__key_0", - "group_table"."aggr__origins__count" AS "aggr__origins__count", - "hit_table"."OriginLocation" AS - "top_hits__origins__originLocation_col_0", - "hit_table"."Origin" AS "top_hits__origins__originLocation_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__origins__key_0") AS - "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__origins__key_0"= - "hit_table"."OriginAirportID")) - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "top_hits__origins__originLocation_col_0", - "top_hits__origins__originLocation_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=1 - ORDER BY "aggr__origins__count" DESC, "aggr__origins__key_0" ASC, - "top_hits_rank" ASC`}, + ExpectedPancakeSQL: "WITH quesma_top_hits_group_table AS (\n" + + " SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `aggr__origins__distinations__parent_count`,\n" + + " `aggr__origins__distinations__key_0`, `aggr__origins__distinations__count`,\n" + + " `aggr__origins__order_1_rank`, `aggr__origins__distinations__order_1_rank`\n" + + " FROM (\n" + + " SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `aggr__origins__distinations__parent_count`,\n" + + " `aggr__origins__distinations__key_0`,\n" + + " `aggr__origins__distinations__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__origins__count` DESC,\n" + + " `aggr__origins__key_0` ASC) AS `aggr__origins__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__origins__key_0` ORDER BY\n" + + " `aggr__origins__distinations__count` DESC,\n" + + " `aggr__origins__distinations__key_0` ASC) AS\n" + + " `aggr__origins__distinations__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__origins__parent_count`,\n" + + " `OriginAirportID` AS `aggr__origins__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__origins__key_0`) AS\n" + + " `aggr__origins__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__origins__key_0`) AS\n" + + " `aggr__origins__distinations__parent_count`,\n" + + " `DestAirportID` AS `aggr__origins__distinations__key_0`,\n" + + " count(*) AS `aggr__origins__distinations__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY `OriginAirportID` AS `aggr__origins__key_0`,\n" + + " `DestAirportID` AS `aggr__origins__distinations__key_0`))\n" + + " WHERE (`aggr__origins__order_1_rank`<=10001 AND\n" + + " `aggr__origins__distinations__order_1_rank`<=10001)\n" + + " ORDER BY `aggr__origins__order_1_rank` ASC,\n" + + " `aggr__origins__distinations__order_1_rank` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__origins__parent_count` AS\n" + + " `aggr__origins__parent_count`,\n" + + " `group_table`.`aggr__origins__key_0` AS `aggr__origins__key_0`,\n" + + " `group_table`.`aggr__origins__count` AS `aggr__origins__count`,\n" + + " `group_table`.`aggr__origins__distinations__parent_count` AS\n" + + " `aggr__origins__distinations__parent_count`,\n" + + " `group_table`.`aggr__origins__distinations__key_0` AS\n" + + " `aggr__origins__distinations__key_0`,\n" + + " `group_table`.`aggr__origins__distinations__count` AS\n" + + " `aggr__origins__distinations__count`,\n" + + " `hit_table`.`DestLocation` AS \n" + + " `top_hits__origins__distinations__destLocation_col_0`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__origins__key_0`,\n" + + " `group_table`.`aggr__origins__distinations__key_0`) AS `top_hits_rank`,\n" + + " `group_table`.`aggr__origins__order_1_rank` AS `aggr__origins__order_1_rank`\n" + + " ,\n" + + " `group_table`.`aggr__origins__distinations__order_1_rank` AS\n" + + " `aggr__origins__distinations__order_1_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `" + TableName + "` AS `hit_table` ON ((`group_table`.`aggr__origins__key_0`=`hit_table`.`OriginAirportID` AND\n" + + " `group_table`.`aggr__origins__distinations__key_0`=`hit_table`.`DestAirportID`)))\n" + + "SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `aggr__origins__distinations__parent_count`,\n" + + " `aggr__origins__distinations__key_0`, `aggr__origins__distinations__count`,\n" + + " `top_hits__origins__distinations__destLocation_col_0`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=1\n" + + "ORDER BY `aggr__origins__order_1_rank` ASC,\n" + + " `aggr__origins__distinations__order_1_rank` ASC, `top_hits_rank` ASC", + ExpectedAdditionalPancakeSQLs: []string{ + "WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__origins__parent_count`,\n" + + " `OriginAirportID` AS `aggr__origins__key_0`,\n" + + " count(*) AS `aggr__origins__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY `OriginAirportID` AS `aggr__origins__key_0`\n" + + " ORDER BY `aggr__origins__count` DESC, `aggr__origins__key_0` ASC\n" + + " LIMIT 10001) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__origins__parent_count` AS\n" + + " `aggr__origins__parent_count`,\n" + + " `group_table`.`aggr__origins__key_0` AS `aggr__origins__key_0`,\n" + + " `group_table`.`aggr__origins__count` AS `aggr__origins__count`,\n" + + " `hit_table`.`OriginLocation` AS\n" + + " `top_hits__origins__originLocation_col_0`,\n" + + " `hit_table`.`Origin` AS `top_hits__origins__originLocation_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__origins__key_0`) AS\n" + + " `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `" + TableName + "` AS `hit_table` ON (`group_table`.`aggr__origins__key_0`=`hit_table`.`OriginAirportID`))\n" + + "SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `top_hits__origins__originLocation_col_0`,\n" + + " `top_hits__origins__originLocation_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=1\n" + + "ORDER BY `aggr__origins__count` DESC, `aggr__origins__key_0` ASC,\n" + + " `top_hits_rank` ASC", + }, }, { // [8] TestName: "histogram, different field than timestamp", @@ -1614,13 +1581,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", 22), }}, }, - ExpectedPancakeSQL: ` - SELECT "FlightDelayMin" AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM ` + TableName + ` - WHERE (("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<= - fromUnixTimestamp64Milli(1707486436029)) AND NOT ("FlightDelayMin" __quesma_match 0)) - GROUP BY "FlightDelayMin" AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `FlightDelayMin` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\n FROM `__quesma_table_name`\n WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1706881636029) AND `timestamp`<=\n fromUnixTimestamp64Milli(1707486436029)) AND NOT (`FlightDelayMin`\n __quesma_match 0))\n GROUP BY `FlightDelayMin` AS `aggr__0__key_0`\n ORDER BY `aggr__0__key_0` ASC", }, { // [9] TestName: "double aggregation with histogram + harder query", @@ -1822,32 +1783,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__count", 11), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "severity" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - WHERE ("host.name" __quesma_match '%prometheus%' AND ("@timestamp">= - fromUnixTimestamp64Milli(1706891809940) AND "@timestamp"<= - fromUnixTimestamp64Milli(1707496609940))) - GROUP BY "severity" AS "aggr__0__key_0", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=4 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n `aggr__0__1__key_0`, `aggr__0__1__count`\n FROM (\n SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n `aggr__0__1__key_0`, `aggr__0__1__count`,\n dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n `aggr__0__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY `aggr__0__1__key_0\n ` ASC) AS `aggr__0__1__order_1_rank`\n FROM (\n SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n `severity` AS `aggr__0__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@\n timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__1__key_0`,\n count(*) AS `aggr__0__1__count`\n FROM `__quesma_table_name`\n WHERE (`host.name` __quesma_match '%prometheus%' AND (`@timestamp`>=\n fromUnixTimestamp64Milli(1706891809940) AND `@timestamp`<=\n fromUnixTimestamp64Milli(1707496609940)))\n GROUP BY `severity` AS `aggr__0__key_0`,\n toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@\n timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__1__key_0`))\n WHERE `aggr__0__order_1_rank`<=4\n ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [10] TestName: "very long: multiple top_metrics + histogram", @@ -2184,80 +2120,75 @@ var AggregationTests = []AggregationTestCase{ }}, }, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<= - fromUnixTimestamp64Milli(1707818397034)) AND "taxful_total_price" > '250') - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC`, - ExpectedAdditionalPancakeSQLs: []string{` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date" - <=fromUnixTimestamp64Milli(1707818397034)) AND "taxful_total_price" > '250') - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset( - toTimezone("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."order_date" AS "top_metrics__1__2__4_col_0", - "hit_table"."order_date" AS "top_metrics__1__2__4_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "order_date" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000)) - WHERE (("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date" - <=fromUnixTimestamp64Milli(1707818397034)) AND "taxful_total_price" > '250')) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__4_col_0", "top_metrics__1__2__4_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, - `WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date" - <=fromUnixTimestamp64Milli(1707818397034)) AND "taxful_total_price" > '250') - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset( - toTimezone("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."taxful_total_price" AS "top_metrics__1__2__5_col_0", - "hit_table"."order_date" AS "top_metrics__1__2__5_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "order_date" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000)) - WHERE (("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date" - <=fromUnixTimestamp64Milli(1707818397034)) AND "taxful_total_price" > '250')) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__5_col_0", "top_metrics__1__2__5_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`}, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + "FROM `" + TableName + "`\n" + + "WHERE ((`order_date`>=fromUnixTimestamp64Milli(1707213597034) AND `order_date`<=fromUnixTimestamp64Milli(1707818397034)) AND `taxful_total_price` > '250')\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(\n" + + " toTimezone(`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`\n" + + "ORDER BY `aggr__1__2__key_0` ASC", + ExpectedAdditionalPancakeSQLs: []string{ + "WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1707213597034) AND `order_date`<=fromUnixTimestamp64Milli(1707818397034)) AND `taxful_total_price` > '250')\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(\n" + + " toTimezone(`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`order_date` AS `top_metrics__1__2__4_col_0`,\n" + + " `hit_table`.`order_date` AS `top_metrics__1__2__4_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `order_date` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `" + TableName + "` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000))\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1707213597034) AND `order_date`<=fromUnixTimestamp64Milli(1707818397034)) AND `taxful_total_price` > '250'))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__4_col_0`, `top_metrics__1__2__4_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + "WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1707213597034) AND `order_date`<=fromUnixTimestamp64Milli(1707818397034)) AND `taxful_total_price` > '250')\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(\n" + + " toTimezone(`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`taxful_total_price` AS `top_metrics__1__2__5_col_0`,\n" + + " `hit_table`.`order_date` AS `top_metrics__1__2__5_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `order_date` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `" + TableName + "` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000))\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1707213597034) AND `order_date`<=fromUnixTimestamp64Milli(1707818397034)) AND `taxful_total_price` > '250'))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__5_col_0`, `top_metrics__1__2__5_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + }, }, { // [11], "old" test, also can be found in testdata/requests.go TestAsyncSearch[0] // Copied it also here to be more sure we do not create some regression @@ -2442,23 +2373,21 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__sample__top_values__count", int64(21)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__sample__count", - sum(count("host.name")) OVER () AS "metric__sample__sample_count_col_0", - sum(count(*)) OVER () AS "aggr__sample__top_values__parent_count", - "host.name" AS "aggr__sample__top_values__key_0", - count(*) AS "aggr__sample__top_values__count" - FROM ( - SELECT "host.name" - FROM __quesma_table_name - WHERE (("@timestamp">=fromUnixTimestamp64Milli(1706009236820) AND "@timestamp" - <=fromUnixTimestamp64Milli(1706010136820)) AND - "__quesma_fulltext_field_name" iLIKE '%user%') - LIMIT 8000) - GROUP BY "host.name" AS "aggr__sample__top_values__key_0" - ORDER BY "aggr__sample__top_values__count" DESC, - "aggr__sample__top_values__key_0" ASC - LIMIT 11`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " sum(count(`host.name`)) OVER () AS `metric__sample__sample_count_col_0`,\n" + + " sum(count(*)) OVER () AS `aggr__sample__top_values__parent_count`,\n" + + " `host.name` AS `aggr__sample__top_values__key_0`,\n" + + " count(*) AS `aggr__sample__top_values__count`\n" + + "FROM (\n" + + " SELECT `host.name`\n" + + " FROM `" + TableName + "`" + + " WHERE ((`@timestamp`>=fromUnixTimestamp64Milli(1706009236820) AND `@timestamp`<=fromUnixTimestamp64Milli(1706010136820)) AND" + + " `__quesma_fulltext_field_name` iLIKE '%user%')\n" + + " LIMIT 8000)\n" + + "GROUP BY `host.name` AS `aggr__sample__top_values__key_0`\n" + + "ORDER BY `aggr__sample__top_values__count` DESC,\n" + + " `aggr__sample__top_values__key_0` ASC\n" + + "LIMIT 11", }, { // [12], "old" test, also can be found in testdata/requests.go TestAsyncSearch[3] // Copied it also here to be more sure we do not create some regression @@ -2591,16 +2520,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", 11), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0" - , count(*) AS "aggr__0__count" - FROM ` + TableName + ` - - WHERE (` + fullTextFieldName + ` iLIKE '%user%' AND - ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `aggr__0__key_0`\n , count(*) AS `aggr__0__count`\n FROM `__quesma_table_name`\n WHERE (`__quesma_fulltext_field_name` iLIKE '%user%' AND (`@timestamp`>=\n fromUnixTimestamp64Milli(1706020999481) AND `@timestamp`<=\n fromUnixTimestamp64Milli(1706021899481)))\n GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `\n aggr__0__key_0`\n ORDER BY `aggr__0__key_0` ASC", }, { // [13], "old" test, also can be found in testdata/requests.go TestAsyncSearch[4] // Copied it also here to be more sure we do not create some regression @@ -2718,32 +2638,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__stats__series__count", 35), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__stats__parent_count", "aggr__stats__key_0", "aggr__stats__count", - "aggr__stats__series__key_0", "aggr__stats__series__count" - FROM ( - SELECT "aggr__stats__parent_count", "aggr__stats__key_0", - "aggr__stats__count", "aggr__stats__series__key_0", - "aggr__stats__series__count", - dense_rank() OVER (ORDER BY "aggr__stats__count" DESC, - "aggr__stats__key_0" ASC) AS "aggr__stats__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__stats__key_0" ORDER BY - "aggr__stats__series__key_0" ASC) AS "aggr__stats__series__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__stats__parent_count", - COALESCE("event.dataset", 'unknown') AS "aggr__stats__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__stats__key_0") AS - "aggr__stats__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__stats__series__key_0", count(*) AS "aggr__stats__series__count" - FROM ` + TableName + ` - WHERE ("@timestamp">fromUnixTimestamp64Milli(1706194439033) AND "@timestamp"<=fromUnixTimestamp64Milli(1706195339033)) - GROUP BY COALESCE("event.dataset", 'unknown') AS "aggr__stats__key_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__stats__series__key_0")) - WHERE "aggr__stats__order_1_rank"<=4 - ORDER BY "aggr__stats__order_1_rank" ASC, - "aggr__stats__series__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__stats__parent_count`, `aggr__stats__key_0`, `aggr__stats__count`,\n `aggr__stats__series__key_0`, `aggr__stats__series__count`\n FROM (\n SELECT `aggr__stats__parent_count`, `aggr__stats__key_0`,\n `aggr__stats__count`, `aggr__stats__series__key_0`,\n `aggr__stats__series__count`,\n dense_rank() OVER (ORDER BY `aggr__stats__count` DESC, `aggr__stats__key_0`\n ASC) AS `aggr__stats__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__stats__key_0` ORDER BY `\n aggr__stats__series__key_0` ASC) AS `aggr__stats__series__order_1_rank`\n FROM (\n SELECT sum(count(*)) OVER () AS `aggr__stats__parent_count`,\n COALESCE(`event.dataset`, 'unknown') AS `aggr__stats__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__stats__key_0`) AS `\n aggr__stats__count`,\n toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS `\n aggr__stats__series__key_0`, count(*) AS `aggr__stats__series__count`\n FROM `__quesma_table_name`\n WHERE (`@timestamp`>fromUnixTimestamp64Milli(1706194439033) AND `@timestamp\n `<=fromUnixTimestamp64Milli(1706195339033))\n GROUP BY COALESCE(`event.dataset`, 'unknown') AS `aggr__stats__key_0`,\n toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS `\n aggr__stats__series__key_0`))\n WHERE `aggr__stats__order_1_rank`<=4\n ORDER BY `aggr__stats__order_1_rank` ASC,\n `aggr__stats__series__order_1_rank` ASC", }, { // [14], "old" test, also can be found in testdata/requests.go TestAsyncSearch[5] // Copied it also here to be more sure we do not create some regression @@ -2840,13 +2735,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__latest_timestamp_col_0", nil), }}, }, - ExpectedPancakeSQL: ` - SELECT avgOrNull("@timestamp") AS "metric__average_timestamp_col_0", minOrNull( - "@timestamp") AS "metric__earliest_timestamp_col_0", maxOrNull("@timestamp") - AS "metric__latest_timestamp_col_0" - FROM ` + TableName + ` - WHERE ((` + fullTextFieldName + ` iLIKE '%posei%' AND "message" __quesma_match '%User logged out%') AND - "host.name" __quesma_match '%poseidon%')`, + ExpectedPancakeSQL: "SELECT avgOrNull(`@timestamp`) AS `metric__average_timestamp_col_0`,\n minOrNull(`@timestamp`) AS `metric__earliest_timestamp_col_0`,\n maxOrNull(`@timestamp`) AS `metric__latest_timestamp_col_0`\n FROM `__quesma_table_name`\n WHERE ((`__quesma_fulltext_field_name` iLIKE '%posei%' AND `message`\n __quesma_match '%User logged out%') AND `host.name` __quesma_match\n '%poseidon%')", }, { // [15] TestName: "date_histogram: regression test", @@ -2967,15 +2856,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1_col_0", 11116.45703125), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count", - sumOrNull("taxful_total_price") AS "metric__0__1_col_0" - FROM ` + TableName + ` - WHERE ("order_date">=fromUnixTimestamp64Milli(1708364456351) AND "order_date"<=fromUnixTimestamp64Milli(1708969256351)) - GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`order_date`) / 86400000) AS `\n aggr__0__key_0`, count(*) AS `aggr__0__count`,\n sumOrNull(`taxful_total_price`) AS `metric__0__1_col_0`\n FROM `__quesma_table_name`\n WHERE (`order_date`>=fromUnixTimestamp64Milli(1708364456351) AND `order_date`<=\n fromUnixTimestamp64Milli(1708969256351))\n GROUP BY toInt64(toUnixTimestamp64Milli(`order_date`) / 86400000) AS `\n aggr__0__key_0`\n ORDER BY `aggr__0__key_0` ASC", }, { // [16] TestName: "simple terms, seen at client's", @@ -3078,15 +2959,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", int64(1757)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "message" AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1708456413795) AND "timestamp"<= - fromUnixTimestamp64Milli(1708488074920)) - GROUP BY "message" AS "aggr__0__key_0" - ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n `message` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\n FROM `__quesma_table_name`\n WHERE (`timestamp`>=fromUnixTimestamp64Milli(1708456413795) AND `timestamp`<=\n fromUnixTimestamp64Milli(1708488074920))\n GROUP BY `message` AS `aggr__0__key_0`\n ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC\n LIMIT 4", }, { // [17] TestName: "triple nested aggs", @@ -3267,18 +3140,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1-bucket__1-metric_col_0", 931.96875), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("order_date") / 43200000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("products.product_name" __quesma_match '%watch%') AS - "aggr__0__1-bucket__count", - sumOrNullIf("taxful_total_price", "products.product_name" __quesma_match '%watch%') AS - "metric__0__1-bucket__1-metric_col_0" - FROM ` + TableName + ` - WHERE ("order_date">=fromUnixTimestamp64Milli(1708627654149) AND "order_date"<=fromUnixTimestamp64Milli(1709232454149)) - GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`order_date`) / 43200000) AS `\n aggr__0__key_0`, count(*) AS `aggr__0__count`,\n countIf(`products.product_name` __quesma_match '%watch%') AS `aggr__0__1-\n bucket__count`,\n sumOrNullIf(`taxful_total_price`, `products.product_name` __quesma_match\n '%watch%') AS `metric__0__1-bucket__1-metric_col_0`\n FROM `__quesma_table_name`\n WHERE (`order_date`>=fromUnixTimestamp64Milli(1708627654149) AND `order_date`<=\n fromUnixTimestamp64Milli(1709232454149))\n GROUP BY toInt64(toUnixTimestamp64Milli(`order_date`) / 43200000) AS `\n aggr__0__key_0`\n ORDER BY `aggr__0__key_0` ASC", }, { // [18] TestName: "complex filters", @@ -3494,40 +3356,38 @@ var AggregationTests = []AggregationTestCase{ //model.NewQueryResultCol("filter_1__metric__time_offset_split__0__2_col_0", nil), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__time_offset_split__count", - toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS - "aggr__time_offset_split__0__key_0", - count(*) AS "aggr__time_offset_split__0__count", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__1_col_0", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__2_col_0" - FROM __quesma_table_name - WHERE ((("order_date">=fromUnixTimestamp64Milli(1708639056376) AND "order_date" - <=fromUnixTimestamp64Milli(1709243856376)) OR ("order_date">= - fromUnixTimestamp64Milli(1708034256376) AND "order_date"<= - fromUnixTimestamp64Milli(1708639056376))) AND ("order_date">= - fromUnixTimestamp64Milli(1708639056376) AND "order_date"<= - fromUnixTimestamp64Milli(1709243856376))) - GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS - "aggr__time_offset_split__0__key_0" - ORDER BY "aggr__time_offset_split__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__time_offset_split__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`order_date`) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`,\n" + + " count(*) AS `aggr__time_offset_split__0__count`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__1_col_0`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__2_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (((`order_date`>=fromUnixTimestamp64Milli(1708639056376) AND `order_date`<=fromUnixTimestamp64Milli(1709243856376)) OR (`order_date`>=\n" + + " fromUnixTimestamp64Milli(1708034256376) AND `order_date`<=\n" + + " fromUnixTimestamp64Milli(1708639056376))) AND (`order_date`>=\n" + + " fromUnixTimestamp64Milli(1708639056376) AND `order_date`<=\n" + + " fromUnixTimestamp64Milli(1709243856376)))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`order_date`) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`\n" + + "ORDER BY `aggr__time_offset_split__0__key_0` ASC", ExpectedAdditionalPancakeSQLs: []string{ - `SELECT sum(count(*)) OVER () AS "aggr__time_offset_split__count", - toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS - "aggr__time_offset_split__0__key_0", - count(*) AS "aggr__time_offset_split__0__count", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__1_col_0", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__2_col_0" - FROM __quesma_table_name - WHERE ((("order_date">=fromUnixTimestamp64Milli(1708639056376) AND - "order_date"<=fromUnixTimestamp64Milli(1709243856376)) OR - ("order_date">=fromUnixTimestamp64Milli(1708034256376) AND - "order_date"<=fromUnixTimestamp64Milli(1708639056376))) AND - ("order_date">=fromUnixTimestamp64Milli(1708034256376) AND - "order_date"<=fromUnixTimestamp64Milli(1708639056376))) - GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS - "aggr__time_offset_split__0__key_0" - ORDER BY "aggr__time_offset_split__0__key_0" ASC`, + "SELECT sum(count(*)) OVER () AS `aggr__time_offset_split__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`order_date`) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`,\n" + + " count(*) AS `aggr__time_offset_split__0__count`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__1_col_0`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__2_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (((`order_date`>=fromUnixTimestamp64Milli(1708639056376) AND\n" + + " `order_date`<=fromUnixTimestamp64Milli(1709243856376)) OR\n" + + " (`order_date`>=fromUnixTimestamp64Milli(1708034256376) AND\n" + + " `order_date`<=fromUnixTimestamp64Milli(1708639056376))) AND\n" + + " (`order_date`>=fromUnixTimestamp64Milli(1708034256376) AND\n" + + " `order_date`<=fromUnixTimestamp64Milli(1708639056376)))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`order_date`) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`\n" + + "ORDER BY `aggr__time_offset_split__0__key_0` ASC", }, ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{{ {Cols: []model.QueryResultCol{ @@ -3653,19 +3513,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__sampler__eventRate__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__sampler__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 15000) AS - "aggr__sampler__eventRate__key_0", - count(*) AS "aggr__sampler__eventRate__count" - FROM ( - SELECT "@timestamp" - FROM ` + TableName + ` - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1709815794995) AND "@timestamp"<=fromUnixTimestamp64Milli(1709816694995)) - LIMIT 20000) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 15000) AS - "aggr__sampler__eventRate__key_0" - ORDER BY "aggr__sampler__eventRate__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__sampler__count`,\n toInt64(toUnixTimestamp64Milli(`@timestamp`) / 15000) AS `\n aggr__sampler__eventRate__key_0`,\n count(*) AS `aggr__sampler__eventRate__count`\n FROM (\n SELECT `@timestamp`\n FROM `__quesma_table_name`\n WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1709815794995) AND `@timestamp\n `<=fromUnixTimestamp64Milli(1709816694995))\n LIMIT 20000)\n GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 15000) AS `\n aggr__sampler__eventRate__key_0`\n ORDER BY `aggr__sampler__eventRate__key_0` ASC", }, { // [20] TestName: "Field statistics > summary for numeric fields", @@ -3939,201 +3787,200 @@ var AggregationTests = []AggregationTestCase{ }}, }, }, - ExpectedPancakeSQL: ` - SELECT count(*) AS "aggr__sample__count", - quantiles(0.050000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_0", - quantiles(0.100000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_1", - quantiles(0.150000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_2", - quantiles(0.200000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_3", - quantiles(0.250000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_4", - quantiles(0.300000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_5", - quantiles(0.350000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_6", - quantiles(0.400000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_7", - quantiles(0.450000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_8", - quantiles(0.500000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_9", - quantiles(0.550000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_10", - quantiles(0.600000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_11", - quantiles(0.650000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_12", - quantiles(0.700000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_13", - quantiles(0.750000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_14", - quantiles(0.800000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_15", - quantiles(0.850000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_16", - quantiles(0.900000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_17", - quantiles(0.950000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_18", - quantiles(0.999999)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_col_19", - quantiles(0.050000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_0", - quantiles(0.100000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_1", - quantiles(0.150000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_2", - quantiles(0.200000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_3", - quantiles(0.250000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_4", - quantiles(0.300000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_5", - quantiles(0.350000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_6", - quantiles(0.400000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_7", - quantiles(0.450000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_8", - quantiles(0.500000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_9", - quantiles(0.550000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_10", - quantiles(0.600000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_11", - quantiles(0.650000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_12", - quantiles(0.700000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_13", - quantiles(0.750000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_14", - quantiles(0.800000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_15", - quantiles(0.850000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_16", - quantiles(0.900000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_17", - quantiles(0.950000)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_18", - quantiles(0.999999)("bytes_gauge") AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_19", - countIf("bytes_gauge" IS NOT NULL) AS - "aggr__sample__bytes_gauge_field_stats__count", - countIf("bytes_gauge" IS NOT NULL) AS - "metric__sample__bytes_gauge_field_stats__actual_stats_col_0", - minOrNullIf("bytes_gauge", "bytes_gauge" IS NOT NULL) AS - "metric__sample__bytes_gauge_field_stats__actual_stats_col_1", - maxOrNullIf("bytes_gauge", "bytes_gauge" IS NOT NULL) AS - "metric__sample__bytes_gauge_field_stats__actual_stats_col_2", - avgOrNullIf("bytes_gauge", "bytes_gauge" IS NOT NULL) AS - "metric__sample__bytes_gauge_field_stats__actual_stats_col_3", - sumOrNullIf("bytes_gauge", "bytes_gauge" IS NOT NULL) AS - "metric__sample__bytes_gauge_field_stats__actual_stats_col_4" - FROM ( - SELECT "bytes_gauge" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1709932426749) AND "timestamp"<= - fromUnixTimestamp64Milli(1711228426749)) - LIMIT 20000)`, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT sum(count(*)) OVER () AS "aggr__sample__count", - quantilesMerge(0.050000)(quantilesState(0.050000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_0", - quantilesMerge(0.100000)(quantilesState(0.100000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_1", - quantilesMerge(0.150000)(quantilesState(0.150000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_2", - quantilesMerge(0.200000)(quantilesState(0.200000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_3", - quantilesMerge(0.250000)(quantilesState(0.250000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_4", - quantilesMerge(0.300000)(quantilesState(0.300000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_5", - quantilesMerge(0.350000)(quantilesState(0.350000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_6", - quantilesMerge(0.400000)(quantilesState(0.400000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_7", - quantilesMerge(0.450000)(quantilesState(0.450000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_8", - quantilesMerge(0.500000)(quantilesState(0.500000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_9", - quantilesMerge(0.550000)(quantilesState(0.550000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_10", - quantilesMerge(0.600000)(quantilesState(0.600000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_11", - quantilesMerge(0.650000)(quantilesState(0.650000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_12", - quantilesMerge(0.700000)(quantilesState(0.700000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_13", - quantilesMerge(0.750000)(quantilesState(0.750000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_14", - quantilesMerge(0.800000)(quantilesState(0.800000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_15", - quantilesMerge(0.850000)(quantilesState(0.850000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_16", - quantilesMerge(0.900000)(quantilesState(0.900000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_17", - quantilesMerge(0.950000)(quantilesState(0.950000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_18", - quantilesMerge(0.999999)(quantilesState(0.999999)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_col_19", - quantilesMerge(0.050000)(quantilesState(0.050000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_0", - quantilesMerge(0.100000)(quantilesState(0.100000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_1", - quantilesMerge(0.150000)(quantilesState(0.150000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_2", - quantilesMerge(0.200000)(quantilesState(0.200000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_3", - quantilesMerge(0.250000)(quantilesState(0.250000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_4", - quantilesMerge(0.300000)(quantilesState(0.300000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_5", - quantilesMerge(0.350000)(quantilesState(0.350000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_6", - quantilesMerge(0.400000)(quantilesState(0.400000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_7", - quantilesMerge(0.450000)(quantilesState(0.450000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_8", - quantilesMerge(0.500000)(quantilesState(0.500000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_9", - quantilesMerge(0.550000)(quantilesState(0.550000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_10", - quantilesMerge(0.600000)(quantilesState(0.600000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_11", - quantilesMerge(0.650000)(quantilesState(0.650000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_12", - quantilesMerge(0.700000)(quantilesState(0.700000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_13", - quantilesMerge(0.750000)(quantilesState(0.750000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_14", - quantilesMerge(0.800000)(quantilesState(0.800000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_15", - quantilesMerge(0.850000)(quantilesState(0.850000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_16", - quantilesMerge(0.900000)(quantilesState(0.900000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_17", - quantilesMerge(0.950000)(quantilesState(0.950000)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_18", - quantilesMerge(0.999999)(quantilesState(0.999999)("bytes_gauge")) OVER () AS - "metric__sample__bytes_gauge_percentiles_keyed_true_col_19", - sum(count(*)) OVER () AS "aggr__sample__bytes_gauge_top__parent_count", - "bytes_gauge" AS "aggr__sample__bytes_gauge_top__key_0", - count(*) AS "aggr__sample__bytes_gauge_top__count" - FROM ( - SELECT "bytes_gauge" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1709932426749) AND "timestamp"<= - fromUnixTimestamp64Milli(1711228426749)) - LIMIT 20000) - GROUP BY "bytes_gauge" AS "aggr__sample__bytes_gauge_top__key_0" - ORDER BY "aggr__sample__bytes_gauge_top__count" DESC, - "aggr__sample__bytes_gauge_top__key_0" ASC - LIMIT 11`, + ExpectedPancakeSQL: "SELECT count(*) AS `aggr__sample__count`,\n" + + " quantiles(0.050000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_0`,\n" + + " quantiles(0.100000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_1`,\n" + + " quantiles(0.150000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_2`,\n" + + " quantiles(0.200000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_3`,\n" + + " quantiles(0.250000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_4`,\n" + + " quantiles(0.300000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_5`,\n" + + " quantiles(0.350000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_6`,\n" + + " quantiles(0.400000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_7`,\n" + + " quantiles(0.450000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_8`,\n" + + " quantiles(0.500000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_9`,\n" + + " quantiles(0.550000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_10`,\n" + + " quantiles(0.600000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_11`,\n" + + " quantiles(0.650000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_12`,\n" + + " quantiles(0.700000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_13`,\n" + + " quantiles(0.750000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_14`,\n" + + " quantiles(0.800000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_15`,\n" + + " quantiles(0.850000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_16`,\n" + + " quantiles(0.900000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_17`,\n" + + " quantiles(0.950000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_18`,\n" + + " quantiles(0.999999)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_19`,\n" + + " quantiles(0.050000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_0`,\n" + + " quantiles(0.100000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_1`,\n" + + " quantiles(0.150000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_2`,\n" + + " quantiles(0.200000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_3`,\n" + + " quantiles(0.250000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_4`,\n" + + " quantiles(0.300000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_5`,\n" + + " quantiles(0.350000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_6`,\n" + + " quantiles(0.400000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_7`,\n" + + " quantiles(0.450000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_8`,\n" + + " quantiles(0.500000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_9`,\n" + + " quantiles(0.550000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_10`,\n" + + " quantiles(0.600000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_11`,\n" + + " quantiles(0.650000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_12`,\n" + + " quantiles(0.700000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_13`,\n" + + " quantiles(0.750000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_14`,\n" + + " quantiles(0.800000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_15`,\n" + + " quantiles(0.850000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_16`,\n" + + " quantiles(0.900000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_17`,\n" + + " quantiles(0.950000)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_18`,\n" + + " quantiles(0.999999)(`bytes_gauge`) AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_19`,\n" + + " countIf(`bytes_gauge` IS NOT NULL) AS\n" + + " `aggr__sample__bytes_gauge_field_stats__count`,\n" + + " countIf(`bytes_gauge` IS NOT NULL) AS\n" + + " `metric__sample__bytes_gauge_field_stats__actual_stats_col_0`,\n" + + " minOrNullIf(`bytes_gauge`, `bytes_gauge` IS NOT NULL) AS\n" + + " `metric__sample__bytes_gauge_field_stats__actual_stats_col_1`,\n" + + " maxOrNullIf(`bytes_gauge`, `bytes_gauge` IS NOT NULL) AS\n" + + " `metric__sample__bytes_gauge_field_stats__actual_stats_col_2`,\n" + + " avgOrNullIf(`bytes_gauge`, `bytes_gauge` IS NOT NULL) AS\n" + + " `metric__sample__bytes_gauge_field_stats__actual_stats_col_3`,\n" + + " sumOrNullIf(`bytes_gauge`, `bytes_gauge` IS NOT NULL) AS\n" + + " `metric__sample__bytes_gauge_field_stats__actual_stats_col_4`\n" + + "FROM (\n" + + " SELECT `bytes_gauge`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1709932426749) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1711228426749))\n" + + " LIMIT 20000)", + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " quantilesMerge(0.050000)(quantilesState(0.050000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_0`,\n" + + " quantilesMerge(0.100000)(quantilesState(0.100000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_1`,\n" + + " quantilesMerge(0.150000)(quantilesState(0.150000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_2`,\n" + + " quantilesMerge(0.200000)(quantilesState(0.200000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_3`,\n" + + " quantilesMerge(0.250000)(quantilesState(0.250000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_4`,\n" + + " quantilesMerge(0.300000)(quantilesState(0.300000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_5`,\n" + + " quantilesMerge(0.350000)(quantilesState(0.350000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_6`,\n" + + " quantilesMerge(0.400000)(quantilesState(0.400000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_7`,\n" + + " quantilesMerge(0.450000)(quantilesState(0.450000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_8`,\n" + + " quantilesMerge(0.500000)(quantilesState(0.500000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_9`,\n" + + " quantilesMerge(0.550000)(quantilesState(0.550000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_10`,\n" + + " quantilesMerge(0.600000)(quantilesState(0.600000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_11`,\n" + + " quantilesMerge(0.650000)(quantilesState(0.650000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_12`,\n" + + " quantilesMerge(0.700000)(quantilesState(0.700000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_13`,\n" + + " quantilesMerge(0.750000)(quantilesState(0.750000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_14`,\n" + + " quantilesMerge(0.800000)(quantilesState(0.800000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_15`,\n" + + " quantilesMerge(0.850000)(quantilesState(0.850000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_16`,\n" + + " quantilesMerge(0.900000)(quantilesState(0.900000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_17`,\n" + + " quantilesMerge(0.950000)(quantilesState(0.950000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_18`,\n" + + " quantilesMerge(0.999999)(quantilesState(0.999999)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_col_19`,\n" + + " quantilesMerge(0.050000)(quantilesState(0.050000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_0`,\n" + + " quantilesMerge(0.100000)(quantilesState(0.100000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_1`,\n" + + " quantilesMerge(0.150000)(quantilesState(0.150000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_2`,\n" + + " quantilesMerge(0.200000)(quantilesState(0.200000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_3`,\n" + + " quantilesMerge(0.250000)(quantilesState(0.250000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_4`,\n" + + " quantilesMerge(0.300000)(quantilesState(0.300000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_5`,\n" + + " quantilesMerge(0.350000)(quantilesState(0.350000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_6`,\n" + + " quantilesMerge(0.400000)(quantilesState(0.400000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_7`,\n" + + " quantilesMerge(0.450000)(quantilesState(0.450000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_8`,\n" + + " quantilesMerge(0.500000)(quantilesState(0.500000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_9`,\n" + + " quantilesMerge(0.550000)(quantilesState(0.550000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_10`,\n" + + " quantilesMerge(0.600000)(quantilesState(0.600000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_11`,\n" + + " quantilesMerge(0.650000)(quantilesState(0.650000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_12`,\n" + + " quantilesMerge(0.700000)(quantilesState(0.700000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_13`,\n" + + " quantilesMerge(0.750000)(quantilesState(0.750000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_14`,\n" + + " quantilesMerge(0.800000)(quantilesState(0.800000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_15`,\n" + + " quantilesMerge(0.850000)(quantilesState(0.850000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_16`,\n" + + " quantilesMerge(0.900000)(quantilesState(0.900000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_17`,\n" + + " quantilesMerge(0.950000)(quantilesState(0.950000)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_18`,\n" + + " quantilesMerge(0.999999)(quantilesState(0.999999)(`bytes_gauge`)) OVER () AS\n" + + " `metric__sample__bytes_gauge_percentiles_keyed_true_col_19`,\n" + + " sum(count(*)) OVER () AS `aggr__sample__bytes_gauge_top__parent_count`,\n" + + " `bytes_gauge` AS `aggr__sample__bytes_gauge_top__key_0`,\n" + + " count(*) AS `aggr__sample__bytes_gauge_top__count`\n" + + "FROM (\n" + + " SELECT `bytes_gauge`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1709932426749) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1711228426749))\n" + + " LIMIT 20000)\n" + + "GROUP BY `bytes_gauge` AS `aggr__sample__bytes_gauge_top__key_0`\n" + + "ORDER BY `aggr__sample__bytes_gauge_top__count` DESC,\n" + + " `aggr__sample__bytes_gauge_top__key_0` ASC\n" + + " LIMIT 11", }, }, { // [21] @@ -4331,28 +4178,27 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("range_4__aggr__3__count", uint64(10)), }}}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("bytes_gauge">=0 AND "bytes_gauge"<1000)) AS - "range_0__aggr__2__count", - countIf(("bytes_gauge">=1000 AND "bytes_gauge"<2000)) AS - "range_1__aggr__2__count", - countIf("bytes_gauge">=-5.5) AS "range_2__aggr__2__count", - countIf("bytes_gauge"<6.555) AS "range_3__aggr__2__count", - countIf("bytes_gauge" IS NOT NULL) AS "range_4__aggr__2__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1713269711790) AND "timestamp"<= - fromUnixTimestamp64Milli(1713270611790))`, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT countIf(("bytes_gauge">=0 AND "bytes_gauge"<1000)) AS - "range_0__aggr__3__count", - countIf(("bytes_gauge">=1000 AND "bytes_gauge"<2000)) AS - "range_1__aggr__3__count", - countIf("bytes_gauge">=-5.5) AS "range_2__aggr__3__count", - countIf("bytes_gauge"<6.555) AS "range_3__aggr__3__count", - countIf("bytes_gauge" IS NOT NULL) AS "range_4__aggr__3__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1713269711790) AND "timestamp"<= - fromUnixTimestamp64Milli(1713270611790))`, + ExpectedPancakeSQL: "SELECT countIf((`bytes_gauge`>=0 AND `bytes_gauge`<1000)) AS\n" + + " `range_0__aggr__2__count`,\n" + + " countIf((`bytes_gauge`>=1000 AND `bytes_gauge`<2000)) AS\n" + + " `range_1__aggr__2__count`,\n" + + " countIf(`bytes_gauge`>=-5.5) AS `range_2__aggr__2__count`,\n" + + " countIf(`bytes_gauge`<6.555) AS `range_3__aggr__2__count`,\n" + + " countIf(`bytes_gauge` IS NOT NULL) AS `range_4__aggr__2__count`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1713269711790) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1713270611790))", + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT countIf((`bytes_gauge`>=0 AND `bytes_gauge`<1000)) AS\n" + + " `range_0__aggr__3__count`,\n" + + " countIf((`bytes_gauge`>=1000 AND `bytes_gauge`<2000)) AS\n" + + " `range_1__aggr__3__count`,\n" + + " countIf(`bytes_gauge`>=-5.5) AS `range_2__aggr__3__count`,\n" + + " countIf(`bytes_gauge`<6.555) AS `range_3__aggr__3__count`,\n" + + " countIf(`bytes_gauge` IS NOT NULL) AS `range_4__aggr__3__count`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1713269711790) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1713270611790))", }, }, { // [22] @@ -4483,16 +4329,15 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("range_2__aggr__2__count", int(414)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("timestamp"=toInt64(toUnixTimestamp(toStartOfDay(subDate(now(), - INTERVAL 3 week)))) AND "timestamp"=toInt64(toUnixTimestamp('2024-04-14'))) AS - "range_2__aggr__2__count" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1712388530059) AND "timestamp"<=fromUnixTimestamp64Milli(1713288530059))`, + ExpectedPancakeSQL: "SELECT countIf(`timestamp`=toInt64(toUnixTimestamp(toStartOfDay(subDate(now(),\n" + + " INTERVAL 3 week)))) AND `timestamp`=toInt64(toUnixTimestamp('2024-04-14'))) AS\n" + + " `range_2__aggr__2__count`\n" + + "FROM `" + TableName + "` \n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1712388530059) AND `timestamp`<=fromUnixTimestamp64Milli(1713288530059))", }, { // [23] TestName: "significant terms aggregation: same as terms for now", @@ -4596,14 +4441,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(206)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "message" AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM ` + TableName + ` - GROUP BY "message" AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 5`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n `message` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n FROM `__quesma_table_name`\n GROUP BY `message` AS `aggr__2__key_0`\n ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC\n LIMIT 5", }, { // [24] TestName: "meta field in aggregation", @@ -4685,15 +4523,7 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__timeseries__61ca57f2-469d-11e7-af02-69e470af7417_col_0", 21), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 79200000) AS - "aggr__timeseries__key_0", count(*) AS "aggr__timeseries__count", - uniq("host.name") AS - "metric__timeseries__61ca57f2-469d-11e7-af02-69e470af7417_col_0" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 79200000) AS - "aggr__timeseries__key_0" - ORDER BY "aggr__timeseries__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 79200000) AS `\n aggr__timeseries__key_0`, count(*) AS `aggr__timeseries__count`,\n uniq(`host.name`) AS `metric__timeseries__61ca57f2-469d-11e7-af02-69e470\n af7417_col_0`\n FROM `__quesma_table_name`\n GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 79200000) AS `\n aggr__timeseries__key_0`\n ORDER BY `aggr__timeseries__key_0` ASC", }, { // [25] TestName: "simple histogram, but min_doc_count: 0", @@ -4824,13 +4654,12 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 2), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1715348876077) AND "timestamp"<=fromUnixTimestamp64Milli(1715349776077)) - GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`bytes`/100)*100 AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `" + TableName + "`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1715348876077) AND `timestamp`<=fromUnixTimestamp64Milli(1715349776077))\n" + + "GROUP BY floor(`bytes`/100)*100 AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [26] TestName: "simple date_histogram, but min_doc_count: 0", @@ -4962,15 +4791,14 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 30000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1715351342900) AND "timestamp"<= - fromUnixTimestamp64Milli(1715352242900)) - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 30000) AS - "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 30000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1715351342900) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1715352242900))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 30000) AS\n" + + " `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [27] TestName: "simple date_histogram, but min_doc_count: 0", @@ -5124,28 +4952,27 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__2__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__2__parent_count", - "aggr__0__2__key_0", "aggr__0__2__count" - FROM ( - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__2__parent_count", - "aggr__0__2__key_0", "aggr__0__2__count", - dense_rank() OVER (ORDER BY "aggr__0__key_0" ASC) AS "aggr__0__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__2__count" DESC, "aggr__0__2__key_0" ASC) AS - "aggr__0__2__order_1_rank" - FROM ( - SELECT floor("rspContentLen"/2000)*2000 AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__2__parent_count", "message" AS "aggr__0__2__key_0", - count(*) AS "aggr__0__2__count" - FROM ` + TableName + ` - GROUP BY floor("rspContentLen"/2000)*2000 AS "aggr__0__key_0", - "message" AS "aggr__0__2__key_0")) - WHERE "aggr__0__2__order_1_rank"<=5 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__2__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__2__parent_count`,\n" + + " `aggr__0__2__key_0`, `aggr__0__2__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__2__parent_count`,\n" + + " `aggr__0__2__key_0`, `aggr__0__2__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__key_0` ASC) AS `aggr__0__order_1_rank`\n" + + " ,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__2__count` DESC, `aggr__0__2__key_0` ASC) AS\n" + + " `aggr__0__2__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(`rspContentLen`/2000)*2000 AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__2__parent_count`, `message` AS `aggr__0__2__key_0`,\n" + + " count(*) AS `aggr__0__2__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY floor(`rspContentLen`/2000)*2000 AS `aggr__0__key_0`,\n" + + " `message` AS `aggr__0__2__key_0`))\n" + + "WHERE `aggr__0__2__order_1_rank`<=5\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__2__order_1_rank` ASC", }, { // [28] TestName: "Terms, completely different tree results from 2 queries - merging them didn't work before", @@ -5333,15 +5160,14 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0__3-bucket_col_0", uint64(0)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "OriginCityName" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("FlightDelay" __quesma_match true) AS "metric__0__1-bucket_col_0", - countIf("Cancelled" __quesma_match true) AS "metric__0__3-bucket_col_0" - FROM ` + TableName + ` - GROUP BY "OriginCityName" AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC - LIMIT 1001`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `OriginCityName` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " countIf(`FlightDelay` __quesma_match true) AS `metric__0__1-bucket_col_0`,\n" + + " countIf(`Cancelled` __quesma_match true) AS `metric__0__3-bucket_col_0`\n" + + "FROM `" + TableName + "`\n" + + "GROUP BY `OriginCityName` AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC\n" + + "LIMIT 1001", }, { // [29] TestName: "Terms, completely different tree results from 2 queries - merging them didn't work before (logs) TODO add results", @@ -5509,33 +5335,32 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__3__2__1_col_0", 2), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__3__parent_count", "aggr__3__key_0", "aggr__3__count", - "metric__3__1_col_0", "aggr__3__2__parent_count", "aggr__3__2__key_0", - "aggr__3__2__count", "metric__3__2__1_col_0" - FROM ( - SELECT "aggr__3__parent_count", "aggr__3__key_0", "aggr__3__count", - "metric__3__1_col_0", "aggr__3__2__parent_count", "aggr__3__2__key_0", - "aggr__3__2__count", "metric__3__2__1_col_0", - dense_rank() OVER (ORDER BY "metric__3__1_col_0" DESC, "aggr__3__key_0" ASC) - AS "aggr__3__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__3__key_0" ORDER BY - "metric__3__2__1_col_0" DESC, "aggr__3__2__key_0" ASC) AS - "aggr__3__2__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__3__parent_count", - "geo.src" AS "aggr__3__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__3__key_0") AS "aggr__3__count", - sumOrNull(sumOrNull("memory")) OVER (PARTITION BY "aggr__3__key_0") AS - "metric__3__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__3__key_0") AS - "aggr__3__2__parent_count", "machine.os" AS "aggr__3__2__key_0", - count(*) AS "aggr__3__2__count", - sumOrNull("memory") AS "metric__3__2__1_col_0" - FROM __quesma_table_name - GROUP BY "geo.src" AS "aggr__3__key_0", "machine.os" AS "aggr__3__2__key_0")) - WHERE ("aggr__3__order_1_rank"<=6 AND "aggr__3__2__order_1_rank"<=6) - ORDER BY "aggr__3__order_1_rank" ASC, "aggr__3__2__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__3__parent_count`, `aggr__3__key_0`, `aggr__3__count`,\n" + + " `metric__3__1_col_0`, `aggr__3__2__parent_count`, `aggr__3__2__key_0`,\n" + + " `aggr__3__2__count`, `metric__3__2__1_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__3__parent_count`, `aggr__3__key_0`, `aggr__3__count`,\n" + + " `metric__3__1_col_0`, `aggr__3__2__parent_count`, `aggr__3__2__key_0`,\n" + + " `aggr__3__2__count`, `metric__3__2__1_col_0`,\n" + + " dense_rank() OVER (ORDER BY `metric__3__1_col_0` DESC, `aggr__3__key_0` ASC)\n" + + " AS `aggr__3__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__3__key_0` ORDER BY\n" + + " `metric__3__2__1_col_0` DESC, `aggr__3__2__key_0` ASC) AS\n" + + " `aggr__3__2__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__3__parent_count`,\n" + + " `geo.src` AS `aggr__3__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__3__key_0`) AS `aggr__3__count`,\n" + + " sumOrNull(sumOrNull(`memory`)) OVER (PARTITION BY `aggr__3__key_0`) AS\n" + + " `metric__3__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__3__key_0`) AS\n" + + " `aggr__3__2__parent_count`, `machine.os` AS `aggr__3__2__key_0`,\n" + + " count(*) AS `aggr__3__2__count`,\n" + + " sumOrNull(`memory`) AS `metric__3__2__1_col_0`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY `geo.src` AS `aggr__3__key_0`, `machine.os` AS `aggr__3__2__key_0`))\n" + + "WHERE (`aggr__3__order_1_rank`<=6 AND `aggr__3__2__order_1_rank`<=6)\n" + + "ORDER BY `aggr__3__order_1_rank` ASC, `aggr__3__2__order_1_rank` ASC", }, { // [30] TestName: "Terms, completely different tree results from 2 queries - merging them didn't work before (logs). what when cardinality = 0?", @@ -5685,15 +5510,14 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__2__1_col_0", int64(0)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "machine.os" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - uniq("clientip") AS "metric__2__1_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1715322159037) AND "timestamp"<=fromUnixTimestamp64Milli(1715376159037)) - GROUP BY "machine.os" AS "aggr__2__key_0" - ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `machine.os` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " uniq(`clientip`) AS `metric__2__1_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1715322159037) AND `timestamp`<=fromUnixTimestamp64Milli(1715376159037))\n" + + "GROUP BY `machine.os` AS `aggr__2__key_0`\n" + + "ORDER BY `metric__2__1_col_0` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 6", }, // terms + histogram // histogram + terms @@ -5884,41 +5708,41 @@ var AggregationTests = []AggregationTestCase{ }}, }, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("message" IS NOT NULL) AS "aggr__0__1-bucket__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, - ExpectedAdditionalPancakeSQLs: []string{` - WITH quesma_top_hits_group_table AS ( - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("message" IS NOT NULL) AS "aggr__0__1-bucket__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__0__key_0" AS "aggr__0__key_0", - "group_table"."aggr__0__count" AS "aggr__0__count", - "group_table"."aggr__0__1-bucket__count" AS "aggr__0__1-bucket__count", - "hit_table"."message" AS "top_metrics__0__1-bucket__1-metric_col_0", - "hit_table"."order_date" AS "top_metrics__0__1-bucket__1-metric_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__0__key_0" ORDER BY - "order_date" DESC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__0__key_0"= - toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)) - WHERE "message" IS NOT NULL) - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1-bucket__count", - "top_metrics__0__1-bucket__1-metric_col_0", - "top_metrics__0__1-bucket__1-metric_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=1 - ORDER BY "aggr__0__key_0" ASC, "top_hits_rank" ASC`}, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000) AS\n" + + " `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " countIf(`message` IS NOT NULL) AS `aggr__0__1-bucket__count`\n" + + "FROM `" + TableName + "`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", + ExpectedAdditionalPancakeSQLs: []string{ + "WITH quesma_top_hits_group_table AS (\n" + + " SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000) AS\n" + + " `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " countIf(`message` IS NOT NULL) AS `aggr__0__1-bucket__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000) AS\n" + + " `aggr__0__key_0`\n" + + " ORDER BY `aggr__0__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__0__key_0` AS `aggr__0__key_0`,\n" + + " `group_table`.`aggr__0__count` AS `aggr__0__count`,\n" + + " `group_table`.`aggr__0__1-bucket__count` AS `aggr__0__1-bucket__count`,\n" + + " `hit_table`.`message` AS `top_metrics__0__1-bucket__1-metric_col_0`,\n" + + " `hit_table`.`order_date` AS `top_metrics__0__1-bucket__1-metric_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__0__key_0` ORDER BY\n" + + " `order_date` DESC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `" + TableName + "` AS `hit_table` ON (`group_table`.`aggr__0__key_0`=\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000))\n" + + " WHERE `message` IS NOT NULL)\n" + + "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1-bucket__count`,\n" + + " `top_metrics__0__1-bucket__1-metric_col_0`,\n" + + " `top_metrics__0__1-bucket__1-metric_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=1\n" + + "ORDER BY `aggr__0__key_0` ASC, `top_hits_rank` ASC", + }, }, { // [32] TestName: "Standard deviation", @@ -6178,35 +6002,34 @@ var AggregationTests = []AggregationTestCase{ model.NewQueryResultCol("metric__0__2_col_9", 1689.9999101859655), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", count("bytes") AS "metric__0__1_col_0", - minOrNull("bytes") AS "metric__0__1_col_1", - maxOrNull("bytes") AS "metric__0__1_col_2", - avgOrNull("bytes") AS "metric__0__1_col_3", - sumOrNull("bytes") AS "metric__0__1_col_4", - sumOrNull("bytes"*"bytes") AS "metric__0__1_col_5", - varPop("bytes") AS "metric__0__1_col_6", - varSamp("bytes") AS "metric__0__1_col_7", - stddevPop("bytes") AS "metric__0__1_col_8", - stddevSamp("bytes") AS "metric__0__1_col_9", - count("bytes") AS "metric__0__2_col_0", - minOrNull("bytes") AS "metric__0__2_col_1", - maxOrNull("bytes") AS "metric__0__2_col_2", - avgOrNull("bytes") AS "metric__0__2_col_3", - sumOrNull("bytes") AS "metric__0__2_col_4", - sumOrNull("bytes"*"bytes") AS "metric__0__2_col_5", - varPop("bytes") AS "metric__0__2_col_6", - varSamp("bytes") AS "metric__0__2_col_7", - stddevPop("bytes") AS "metric__0__2_col_8", - stddevSamp("bytes") AS "metric__0__2_col_9" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1716327334210) AND "timestamp"<= - fromUnixTimestamp64Milli(1716381334210)) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 600000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`, count(`bytes`) AS `metric__0__1_col_0`,\n" + + " minOrNull(`bytes`) AS `metric__0__1_col_1`,\n" + + " maxOrNull(`bytes`) AS `metric__0__1_col_2`,\n" + + " avgOrNull(`bytes`) AS `metric__0__1_col_3`,\n" + + " sumOrNull(`bytes`) AS `metric__0__1_col_4`,\n" + + " sumOrNull(`bytes`*`bytes`) AS `metric__0__1_col_5`,\n" + + " varPop(`bytes`) AS `metric__0__1_col_6`,\n" + + " varSamp(`bytes`) AS `metric__0__1_col_7`,\n" + + " stddevPop(`bytes`) AS `metric__0__1_col_8`,\n" + + " stddevSamp(`bytes`) AS `metric__0__1_col_9`,\n" + + " count(`bytes`) AS `metric__0__2_col_0`,\n" + + " minOrNull(`bytes`) AS `metric__0__2_col_1`,\n" + + " maxOrNull(`bytes`) AS `metric__0__2_col_2`,\n" + + " avgOrNull(`bytes`) AS `metric__0__2_col_3`,\n" + + " sumOrNull(`bytes`) AS `metric__0__2_col_4`,\n" + + " sumOrNull(`bytes`*`bytes`) AS `metric__0__2_col_5`,\n" + + " varPop(`bytes`) AS `metric__0__2_col_6`,\n" + + " varSamp(`bytes`) AS `metric__0__2_col_7`,\n" + + " stddevPop(`bytes`) AS `metric__0__2_col_8`,\n" + + " stddevSamp(`bytes`) AS `metric__0__2_col_9`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1716327334210) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1716381334210))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 600000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [33] TestName: "0 result rows in 2x terms", @@ -6279,29 +6102,28 @@ var AggregationTests = []AggregationTestCase{ }`, ExpectedResponse: `{"response": {"aggregations":{"0": {"buckets": []}}}}`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__parent_count", "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__parent_count", "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__count" DESC, "aggr__0__1__key_0" ASC) AS - "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__1__parent_count", "message" AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - WHERE ("message" IS NOT NULL AND NOT ("message" __quesma_match '%US%')) - GROUP BY "host.name" AS "aggr__0__key_0", "message" AS "aggr__0__1__key_0")) - WHERE ("aggr__0__order_1_rank"<=11 AND "aggr__0__1__order_1_rank"<=4) - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__parent_count`, `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__parent_count`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n" + + " `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__count` DESC, `aggr__0__1__key_0` ASC) AS\n" + + " `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `host.name` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__1__parent_count`, `message` AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`message` IS NOT NULL AND NOT (`message` __quesma_match '%US%'))\n" + + " GROUP BY `host.name` AS `aggr__0__key_0`, `message` AS `aggr__0__1__key_0`))\n" + + "WHERE (`aggr__0__order_1_rank`<=11 AND `aggr__0__1__order_1_rank`<=4)\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [34] TestName: "0 result rows in 3x terms", @@ -6386,41 +6208,40 @@ var AggregationTests = []AggregationTestCase{ }`, ExpectedResponse: `{"response": {"aggregations":{"0": {"buckets": []}}}}`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__parent_count", "aggr__0__1__key_0", "aggr__0__1__count", - "aggr__0__1__2__parent_count", "aggr__0__1__2__key_0", "aggr__0__1__2__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__parent_count", "aggr__0__1__key_0", "aggr__0__1__count", - "aggr__0__1__2__parent_count", "aggr__0__1__2__key_0", - "aggr__0__1__2__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__count" DESC, "aggr__0__1__key_0" ASC) AS - "aggr__0__1__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0", "aggr__0__1__key_0" ORDER - BY "aggr__0__1__2__count" DESC, "aggr__0__1__2__key_0" ASC) AS "aggr__0__1__2__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__1__parent_count", "message" AS "aggr__0__1__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0", "aggr__0__1__key_0") AS - "aggr__0__1__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0", "aggr__0__1__key_0") AS - "aggr__0__1__2__parent_count", "message" AS "aggr__0__1__2__key_0", - count(*) AS "aggr__0__1__2__count" - FROM __quesma_table_name - WHERE ("message" IS NOT NULL AND NOT ("message" __quesma_match '%US%')) - GROUP BY "host.name" AS "aggr__0__key_0", "message" AS "aggr__0__1__key_0", - "message" AS "aggr__0__1__2__key_0")) - WHERE (("aggr__0__order_1_rank"<=11 AND "aggr__0__1__order_1_rank"<=4) AND - "aggr__0__1__2__order_1_rank"<=4) - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC, - "aggr__0__1__2__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__parent_count`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " `aggr__0__1__2__parent_count`, `aggr__0__1__2__key_0`, `aggr__0__1__2__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__parent_count`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " `aggr__0__1__2__parent_count`, `aggr__0__1__2__key_0`,\n" + + " `aggr__0__1__2__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n" + + " `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__count` DESC, `aggr__0__1__key_0` ASC) AS\n" + + " `aggr__0__1__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__1__key_0` ORDER\n" + + " BY `aggr__0__1__2__count` DESC, `aggr__0__1__2__key_0` ASC) AS `aggr__0__1__2__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `host.name` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__1__parent_count`, `message` AS `aggr__0__1__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__1__key_0`) AS\n" + + " `aggr__0__1__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__1__key_0`) AS\n" + + " `aggr__0__1__2__parent_count`, `message` AS `aggr__0__1__2__key_0`,\n" + + " count(*) AS `aggr__0__1__2__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`message` IS NOT NULL AND NOT (`message` __quesma_match '%US%'))\n" + + " GROUP BY `host.name` AS `aggr__0__key_0`, `message` AS `aggr__0__1__key_0`,\n" + + " `message` AS `aggr__0__1__2__key_0`))\n" + + "WHERE ((`aggr__0__order_1_rank`<=11 AND `aggr__0__1__order_1_rank`<=4) AND\n" + + " `aggr__0__1__2__order_1_rank`<=4)\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC,\n" + + " `aggr__0__1__2__order_1_rank` ASC", }, { // [35] TestName: "0 result rows in terms+histogram", @@ -6490,27 +6311,26 @@ var AggregationTests = []AggregationTestCase{ }`, ExpectedResponse: `{"response": {"aggregations":{"0": {"buckets": []}}}}`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) - AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - "FlightDelayMin" AS "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" - FROM ` + TableName + ` - WHERE ("message" IS NOT NULL AND NOT ("message" __quesma_match '%US%')) - GROUP BY "host.name" AS "aggr__0__key_0", - "FlightDelayMin" AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=9 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC)\n" + + " AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `host.name` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " `FlightDelayMin` AS `aggr__0__1__key_0`, count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`message` IS NOT NULL AND NOT (`message` __quesma_match '%US%'))\n" + + " GROUP BY `host.name` AS `aggr__0__key_0`,\n" + + " `FlightDelayMin` AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=9\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [36] TestName: "0 result rows in terms+histogram + meta field", @@ -6601,27 +6421,26 @@ var AggregationTests = []AggregationTestCase{ } }`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) - AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - "FlightDelayMin" AS "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" - FROM ` + TableName + ` - WHERE ("message" IS NOT NULL AND NOT ("message" __quesma_match '%US%')) - GROUP BY "host.name" AS "aggr__0__key_0", - "FlightDelayMin" AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=11 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC)\n" + + " AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `host.name` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " `FlightDelayMin` AS `aggr__0__1__key_0`, count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`message` IS NOT NULL AND NOT (`message` __quesma_match '%US%'))\n" + + " GROUP BY `host.name` AS `aggr__0__key_0`,\n" + + " `FlightDelayMin` AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [37] // Now we don't copy, as it's nested. Tested with Elasticsearch. @@ -6698,27 +6517,26 @@ var AggregationTests = []AggregationTestCase{ }`, ExpectedResponse: `{"response": {"aggregations":{"0": {"buckets": []}}}}`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "host.name" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - "FlightDelayMin" AS "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - WHERE ("message" IS NOT NULL AND NOT ("message" __quesma_match '%US%')) - GROUP BY "host.name" AS "aggr__0__key_0", - "FlightDelayMin" AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=11 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n" + + " `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `host.name` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " `FlightDelayMin` AS `aggr__0__1__key_0`, count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`message` IS NOT NULL AND NOT (`message` __quesma_match '%US%'))\n" + + " GROUP BY `host.name` AS `aggr__0__key_0`,\n" + + " `FlightDelayMin` AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [38] TestName: "simplest top_metrics, no sort", @@ -6778,14 +6596,14 @@ var AggregationTests = []AggregationTestCase{ {}, }, AdditionalAcceptableDifference: []string{"tm_empty_result"}, // TODO: check, but we should return empty result - ExpectedPancakeSQL: ` - SELECT "message" AS "top_metrics__tm_with_result_col_0" - FROM __quesma_table_name - LIMIT 2`, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT "message" AS "top_metrics__tm_empty_result_col_0" - FROM __quesma_table_name - LIMIT 1`}, + ExpectedPancakeSQL: "SELECT `message` AS `top_metrics__tm_with_result_col_0`\n" + + "FROM `" + TableName + "`\n" + + "LIMIT 2", + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT `message` AS `top_metrics__tm_empty_result_col_0`\n" + + "FROM `" + TableName + "`\n" + + "LIMIT 1", + }, }, { // [39] TestName: "simplest top_metrics, with sort", @@ -6851,18 +6669,18 @@ var AggregationTests = []AggregationTestCase{ {}, }, AdditionalAcceptableDifference: []string{"tm_empty_result"}, // TODO: check, but we should return empty result - ExpectedPancakeSQL: ` - SELECT "message" AS "top_metrics__tm_with_result_col_0", - "timestamp" AS "top_metrics__tm_with_result_col_1" - FROM __quesma_table_name - ORDER BY "timestamp" DESC - LIMIT 1`, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT "message" AS "top_metrics__tm_empty_result_col_0", - "timestamp" AS "top_metrics__tm_empty_result_col_1" - FROM __quesma_table_name - ORDER BY "timestamp" DESC - LIMIT 1`}, + ExpectedPancakeSQL: "SELECT `message` AS `top_metrics__tm_with_result_col_0`,\n" + + " `timestamp` AS `top_metrics__tm_with_result_col_1`\n" + + "FROM `" + TableName + "`\n" + + "ORDER BY `timestamp` DESC\n" + + "LIMIT 1", + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT `message` AS `top_metrics__tm_empty_result_col_0`,\n" + + " `timestamp` AS `top_metrics__tm_empty_result_col_1`\n" + + "FROM `" + TableName + "`\n" + + "ORDER BY `timestamp` DESC\n" + + "LIMIT 1", + }, }, { // [40] TestName: "terms ordered by subaggregation", @@ -6924,16 +6742,15 @@ var AggregationTests = []AggregationTestCase{ }`, ExpectedResponse: `{"aggregations": {"2": {"buckets": []}}}`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "name" AS "aggr__2__key_0", - count(*) AS "aggr__2__count", - sumOrNull("total") AS "metric__2__1_col_0" - FROM ` + TableName + ` - WHERE NOT ((("abc">=0 AND "abc"<600) OR "type" __quesma_match '%def%')) - GROUP BY "name" AS "aggr__2__key_0" - ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC - LIMIT 11`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `name` AS `aggr__2__key_0`, \n" + + " count(*) AS `aggr__2__count`,\n" + + " sumOrNull(`total`) AS `metric__2__1_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE NOT (((`abc`>=0 AND `abc`<600) OR `type` __quesma_match '%def%'))\n" + + "GROUP BY `name` AS `aggr__2__key_0`\n" + + "ORDER BY `metric__2__1_col_0` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 11", }, { // [41] TestName: "0 result rows in 2x terms", @@ -6986,28 +6803,27 @@ var AggregationTests = []AggregationTestCase{ }`, ExpectedResponse: `{"response": {"aggregations":{"0": {"buckets": []}}}}`, ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__parent_count", "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__parent_count", "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__count" DESC, "aggr__0__1__key_0" ASC) AS - "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "OriginAirportID" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__1__parent_count", "DestAirportID" AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - GROUP BY "OriginAirportID" AS "aggr__0__key_0", - "DestAirportID" AS "aggr__0__1__key_0")) - WHERE ("aggr__0__order_1_rank"<=11 AND "aggr__0__1__order_1_rank"<=4) - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__parent_count`, `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__parent_count`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n" + + " `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__count` DESC, `aggr__0__1__key_0` ASC) AS\n" + + " `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `OriginAirportID` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__1__parent_count`, `DestAirportID` AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY `OriginAirportID` AS `aggr__0__key_0`,\n" + + " `DestAirportID` AS `aggr__0__1__key_0`))\n" + + "WHERE (`aggr__0__order_1_rank`<=11 AND `aggr__0__1__order_1_rank`<=4)\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, } diff --git a/platform/testdata/aggregation_requests_2.go b/platform/testdata/aggregation_requests_2.go index 2dd2e106f..11db0a0fe 100644 --- a/platform/testdata/aggregation_requests_2.go +++ b/platform/testdata/aggregation_requests_2.go @@ -370,95 +370,21 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__year2__count", uint64(33)), }}}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp",'Europe/Warsaw'))*1000) / 86400000) AS "aggr__day1__key_0", - count(*) AS "aggr__day1__count" - FROM ` + TableName + ` - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp",'Europe/Warsaw'))*1000) / 86400000) AS "aggr__day1__key_0" - ORDER BY "aggr__day1__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@timestamp`,'Europe/Warsaw'))*1000) / 86400000) AS `aggr__day1__key_0`, count(*) AS `aggr__day1__count` FROM `__quesma_table_name` GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@timestamp`,'Europe/Warsaw'))*1000) / 86400000) AS `aggr__day1__key_0` ORDER BY `aggr__day1__key_0` ASC", ExpectedAdditionalPancakeSQLs: []string{ - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__day2__key_0", count(*) AS "aggr__day2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__day2__key_0" - ORDER BY "aggr__day2__key_0" ASC`, - `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp",'Europe/Warsaw'))*1000) / 3600000) AS "aggr__hour1__key_0", - count(*) AS "aggr__hour1__count" - FROM ` + TableName + ` - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp",'Europe/Warsaw'))*1000) / 3600000) AS "aggr__hour1__key_0" - ORDER BY "aggr__hour1__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__hour2__key_0", count(*) AS "aggr__hour2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__hour2__key_0" - ORDER BY "aggr__hour2__key_0" ASC`, - `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp",'Europe/Warsaw'))*1000) / 60000) AS "aggr__minute1__key_0", - count(*) AS "aggr__minute1__count" - FROM ` + TableName + ` - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp",'Europe/Warsaw'))*1000) / 60000) AS "aggr__minute1__key_0" - ORDER BY "aggr__minute1__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__minute2__key_0", count(*) AS "aggr__minute2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__minute2__key_0" - ORDER BY "aggr__minute2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 - AS "aggr__month1__key_0", count(*) AS "aggr__month1__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 - AS "aggr__month1__key_0" - ORDER BY "aggr__month1__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__month2__key_0", count(*) AS "aggr__month2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__month2__key_0" - ORDER BY "aggr__month2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 - AS "aggr__quarter1__key_0", count(*) AS "aggr__quarter1__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 AS - "aggr__quarter1__key_0" - ORDER BY "aggr__quarter1__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__quarter2__key_0", count(*) AS "aggr__quarter2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__quarter2__key_0" - ORDER BY "aggr__quarter2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 AS - "aggr__week1__key_0", count(*) AS "aggr__week1__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 - AS "aggr__week1__key_0" - ORDER BY "aggr__week1__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__week2__key_0", count(*) AS "aggr__week2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__week2__key_0" - ORDER BY "aggr__week2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 - AS "aggr__year1__key_0", count(*) AS "aggr__year1__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 - AS "aggr__year1__key_0" - ORDER BY "aggr__year1__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__year2__key_0", count(*) AS "aggr__year2__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'UTC'))))*1000 AS - "aggr__year2__key_0" - ORDER BY "aggr__year2__key_0" ASC`, + "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000) AS `aggr__day2__key_0`, count(*) AS `aggr__day2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 86400000) AS `aggr__day2__key_0` ORDER BY `aggr__day2__key_0` ASC", + "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@timestamp`,'Europe/Warsaw'))*1000) / 3600000) AS `aggr__hour1__key_0`, count(*) AS `aggr__hour1__count` FROM `__quesma_table_name` GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@timestamp`,'Europe/Warsaw'))*1000) / 3600000) AS `aggr__hour1__key_0` ORDER BY `aggr__hour1__key_0` ASC", + "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 3600000) AS `aggr__hour2__key_0`, count(*) AS `aggr__hour2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 3600000) AS `aggr__hour2__key_0` ORDER BY `aggr__hour2__key_0` ASC", + "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@timestamp`,'Europe/Warsaw'))*1000) / 60000) AS `aggr__minute1__key_0`, count(*) AS `aggr__minute1__count` FROM `__quesma_table_name` GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(`@timestamp`,'Europe/Warsaw'))*1000) / 60000) AS `aggr__minute1__key_0` ORDER BY `aggr__minute1__key_0` ASC", + "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS `aggr__minute2__key_0`, count(*) AS `aggr__minute2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS `aggr__minute2__key_0` ORDER BY `aggr__minute2__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__month1__key_0`, count(*) AS `aggr__month1__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__month1__key_0` ORDER BY `aggr__month1__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__month2__key_0`, count(*) AS `aggr__month2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__month2__key_0` ORDER BY `aggr__month2__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__quarter1__key_0`, count(*) AS `aggr__quarter1__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__quarter1__key_0` ORDER BY `aggr__quarter1__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__quarter2__key_0`, count(*) AS `aggr__quarter2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__quarter2__key_0` ORDER BY `aggr__quarter2__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfWeek(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__week1__key_0`, count(*) AS `aggr__week1__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__week1__key_0` ORDER BY `aggr__week1__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfWeek(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__week2__key_0`, count(*) AS `aggr__week2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__week2__key_0` ORDER BY `aggr__week2__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__year1__key_0`, count(*) AS `aggr__year1__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone(`@timestamp`,'Europe/Warsaw'))))*1000 AS `aggr__year1__key_0` ORDER BY `aggr__year1__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__year2__key_0`, count(*) AS `aggr__year2__count` FROM `__quesma_table_name` GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone(`@timestamp`,'UTC'))))*1000 AS `aggr__year2__key_0` ORDER BY `aggr__year2__key_0` ASC", }, }, { // [43] @@ -600,17 +526,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__2__2_col_0", 10), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "response" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - quantiles(0.010000)("timestamp") AS "metric__2__1_col_0", - quantiles(0.020000)("timestamp") AS "metric__2__1_col_1", - sumOrNull("count") AS "metric__2__2_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401475845) AND "timestamp"<=fromUnixTimestamp64Milli(1714697475845)) - GROUP BY "response" AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`, `response` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`, quantiles(0.010000)(`timestamp`) AS `metric__2__1_col_0`, quantiles(0.020000)(`timestamp`) AS `metric__2__1_col_1`, sumOrNull(`count`) AS `metric__2__2_col_0` FROM `__quesma_table_name` WHERE (`timestamp`>=fromUnixTimestamp64Milli(1713401475845) AND `timestamp`<=fromUnixTimestamp64Milli(1714697475845)) GROUP BY `response` AS `aggr__2__key_0` ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC LIMIT 4", }, { // [44] TestName: "2x terms with nulls 1/4, nulls in second aggregation, with missing parameter", @@ -751,30 +667,29 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__count" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - count(*) AS "aggr__2__8__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0")) - WHERE ("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=20) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__count` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " count(*) AS `aggr__2__8__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=20)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC", }, { // [45] TestName: "2x terms with nulls 2/4, nulls in the second aggregation, but no missing parameter", @@ -975,28 +890,27 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__count_1", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__count" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", "limbName" AS "aggr__2__8__key_0", - count(*) AS "aggr__2__8__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", "limbName" AS "aggr__2__8__key_0")) - WHERE ("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=21) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__count` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`, `limbName` AS `aggr__2__8__key_0`,\n" + + " count(*) AS `aggr__2__8__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`, `limbName` AS `aggr__2__8__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=21)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC", }, { // [46] TestName: "2x terms with nulls 3/4, nulls in the first aggregation, with missing parameter", @@ -1138,30 +1052,29 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__count" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - COALESCE("surname", 'miss') AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - count(*) AS "aggr__2__8__count" - FROM __quesma_table_name - GROUP BY COALESCE("surname", 'miss') AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0")) - WHERE ("aggr__2__order_1_rank"<=200 AND "aggr__2__8__order_1_rank"<=20) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__count` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " COALESCE(`surname`, 'miss') AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " count(*) AS `aggr__2__8__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY COALESCE(`surname`, 'miss') AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=200 AND `aggr__2__8__order_1_rank`<=20)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC", }, { // [47] TestName: "2x terms with nulls 4/4, nulls in the first aggregation, without missing parameter", @@ -1325,28 +1238,27 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__count" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", "limbName" AS "aggr__2__8__key_0", - count(*) AS "aggr__2__8__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", "limbName" AS "aggr__2__8__key_0")) - WHERE ("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=21) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__count` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`, `limbName` AS `aggr__2__8__key_0`,\n" + + " count(*) AS `aggr__2__8__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`, `limbName` AS `aggr__2__8__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=21)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC", }, { // [48], "old" test, also can be found in testdata/requests.go TestAsyncSearch[3] // Copied it also here to be more sure we do not create some regression @@ -1500,28 +1412,26 @@ var AggregationTests2 = []AggregationTestCase{ `ORDER BY ` + timestampGroupByClause, }, */ - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", - "aggr__2__3__count" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", - "aggr__2__3__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank" - FROM ( - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 40000) AS - "aggr__2__3__key_0", count(*) AS "aggr__2__3__count" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__2__key_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 40000) AS - "aggr__2__3__key_0")) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__key_0` ASC) AS `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 40000) AS\n" + + " `aggr__2__3__key_0`, count(*) AS `aggr__2__3__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__2__key_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 40000) AS\n" + + " `aggr__2__3__key_0`))\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, { // [49] TODO should null be in the response? Maybe try to replicate and see if it's fine as is. TestName: "2x histogram", @@ -1661,26 +1571,24 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__3__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", - "aggr__2__3__count" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", - "aggr__2__3__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank" - FROM ( - SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - floor("bytes2"/5)*5 AS "aggr__2__3__key_0", - count(*) AS "aggr__2__3__count" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1715348876077) AND "timestamp"<=fromUnixTimestamp64Milli(1715349776077)) - GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0", - floor("bytes2"/5)*5 AS "aggr__2__3__key_0")) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__key_0` ASC) AS `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(`bytes`/100)*100 AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " floor(`bytes2`/5)*5 AS `aggr__2__3__key_0`,\n" + + " count(*) AS `aggr__2__3__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1715348876077) AND `timestamp`<=fromUnixTimestamp64Milli(1715349776077))\n" + + " GROUP BY floor(`bytes`/100)*100 AS `aggr__2__key_0`,\n" + + " floor(`bytes2`/5)*5 AS `aggr__2__3__key_0`))\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, { // [50] TODO: what about nulls in histogram? Maybe they should be treated like in terms? TestName: "2x histogram with min_doc_count 0", @@ -1855,26 +1763,24 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__3__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", - "aggr__2__3__count" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", - "aggr__2__3__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank" - FROM ( - SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - floor("bytes2"/5)*5 AS "aggr__2__3__key_0", - count(*) AS "aggr__2__3__count" - FROM ` + TableName + ` - WHERE ("timestamp">=fromUnixTimestamp64Milli(1715348876077) AND "timestamp"<=fromUnixTimestamp64Milli(1715349776077)) - GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0", - floor("bytes2"/5)*5 AS "aggr__2__3__key_0")) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__key_0` ASC) AS `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(`bytes`/100)*100 AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " floor(`bytes2`/5)*5 AS `aggr__2__3__key_0`,\n" + + " count(*) AS `aggr__2__3__count`\n" + + " FROM `" + TableName + "`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1715348876077) AND `timestamp`<=fromUnixTimestamp64Milli(1715349776077))\n" + + " GROUP BY floor(`bytes`/100)*100 AS `aggr__2__key_0`,\n" + + " floor(`bytes2`/5)*5 AS `aggr__2__3__key_0`))\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, { // [51] TestName: "2x terms with sampler in the middle", @@ -2032,33 +1938,32 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__5__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__count", "aggr__2__8__5__parent_count", "aggr__2__8__5__key_0", - "aggr__2__8__5__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__count", "aggr__2__8__5__parent_count", "aggr__2__8__5__key_0", - "aggr__2__8__5__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__5__count" DESC, "aggr__2__8__5__key_0" ASC) AS - "aggr__2__8__5__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__8__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__5__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__5__key_0", - count(*) AS "aggr__2__8__5__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__5__key_0")) - WHERE ("aggr__2__order_1_rank"<=201 AND "aggr__2__8__5__order_1_rank"<=20) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__5__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__count`, `aggr__2__8__5__parent_count`, `aggr__2__8__5__key_0`,\n" + + " `aggr__2__8__5__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__count`, `aggr__2__8__5__parent_count`, `aggr__2__8__5__key_0`,\n" + + " `aggr__2__8__5__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__5__count` DESC, `aggr__2__8__5__key_0` ASC) AS\n" + + " `aggr__2__8__5__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__8__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__5__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__5__key_0`,\n" + + " count(*) AS `aggr__2__8__5__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__5__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=201 AND `aggr__2__8__5__order_1_rank`<=20)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__5__order_1_rank` ASC", }, { // [52] TestName: "2x terms with random_sampler in the middle", @@ -2221,33 +2126,32 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__5__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__count", "aggr__2__8__5__parent_count", "aggr__2__8__5__key_0", - "aggr__2__8__5__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__count", "aggr__2__8__5__parent_count", "aggr__2__8__5__key_0", - "aggr__2__8__5__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__5__count" DESC, "aggr__2__8__5__key_0" ASC) AS - "aggr__2__8__5__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__8__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__5__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__5__key_0", - count(*) AS "aggr__2__8__5__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__5__key_0")) - WHERE ("aggr__2__order_1_rank"<=201 AND "aggr__2__8__5__order_1_rank"<=20) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__5__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__count`, `aggr__2__8__5__parent_count`, `aggr__2__8__5__key_0`,\n" + + " `aggr__2__8__5__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__count`, `aggr__2__8__5__parent_count`, `aggr__2__8__5__key_0`,\n" + + " `aggr__2__8__5__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__5__count` DESC, `aggr__2__8__5__key_0` ASC) AS\n" + + " `aggr__2__8__5__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__8__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__5__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__5__key_0`,\n" + + " count(*) AS `aggr__2__8__5__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__5__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=201 AND `aggr__2__8__5__order_1_rank`<=20)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__5__order_1_rank` ASC", }, { // [53] TestName: "terms order by quantile, simplest - only one percentile", @@ -2452,33 +2356,29 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1__2_col_0", []float64{5}), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", - "aggr__0__1__key_0", "aggr__0__1__count", "metric__0__1__2_col_0" - FROM ( - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", - "aggr__0__1__key_0", "aggr__0__1__count", "metric__0__1__2_col_0", - dense_rank() OVER (ORDER BY "aggr__0__key_0" ASC) AS "aggr__0__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "metric__0__1__2_col_0" DESC, "aggr__0__1__key_0" ASC) AS - "aggr__0__1__order_1_rank" - FROM ( - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 43200000) AS - "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__1__parent_count", "container.name" AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count", - quantiles(0.750000)("docker.cpu.total.pct") AS "metric__0__1__2_col_0" - FROM __quesma_table_name - WHERE ("data_stream.dataset"='docker.cpu' AND ("@timestamp">= - fromUnixTimestamp64Milli(1723967652291) AND "@timestamp"<= - fromUnixTimestamp64Milli(1725263652291))) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 43200000) AS - "aggr__0__key_0", "container.name" AS "aggr__0__1__key_0")) - WHERE "aggr__0__1__order_1_rank"<=6 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__parent_count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`, `metric__0__1__2_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__parent_count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`, `metric__0__1__2_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__key_0` ASC) AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `metric__0__1__2_col_0` DESC, `aggr__0__1__key_0` ASC) AS\n" + + " `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 43200000) AS\n" + + " `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__1__parent_count`, `container.name` AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`,\n" + + " quantiles(0.750000)(`docker.cpu.total.pct`) AS `metric__0__1__2_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`data_stream.dataset`='docker.cpu' AND (`@timestamp`>=fromUnixTimestamp64Milli(1723967652291) AND `@timestamp`<=fromUnixTimestamp64Milli(1725263652291)))\n" + + " GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 43200000) AS\n" + + " `aggr__0__key_0`, `container.name` AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__1__order_1_rank`<=6\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [54] TestName: "terms order by quantile - more percentiles", @@ -2681,34 +2581,32 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1__2_col_2", []float64{22.2}), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", - "aggr__0__1__key_0", "aggr__0__1__count", "metric__0__1__2_col_0", - "metric__0__1__2_col_1", "metric__0__1__2_col_2" - FROM ( - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", - "aggr__0__1__key_0", "aggr__0__1__count", "metric__0__1__2_col_0", - "metric__0__1__2_col_1", "metric__0__1__2_col_2", - dense_rank() OVER (ORDER BY "aggr__0__key_0" ASC) AS "aggr__0__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "metric__0__1__2_col_1" DESC, "aggr__0__1__key_0" ASC) AS - "aggr__0__1__order_1_rank" - FROM ( - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 43200000) AS - "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__1__parent_count", "container.name" AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count", - quantiles(0.100000)("docker.cpu.total.pct") AS "metric__0__1__2_col_0", - quantiles(0.750000)("docker.cpu.total.pct") AS "metric__0__1__2_col_1", - quantiles(0.990000)("docker.cpu.total.pct") AS "metric__0__1__2_col_2" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 43200000) AS - "aggr__0__key_0", "container.name" AS "aggr__0__1__key_0")) - WHERE "aggr__0__1__order_1_rank"<=6 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__parent_count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`, `metric__0__1__2_col_0`,\n" + + " `metric__0__1__2_col_1`, `metric__0__1__2_col_2`\n" + + "FROM (\n" + + " SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__parent_count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`, `metric__0__1__2_col_0`,\n" + + " `metric__0__1__2_col_1`, `metric__0__1__2_col_2`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__key_0` ASC) AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `metric__0__1__2_col_1` DESC, `aggr__0__1__key_0` ASC) AS\n" + + " `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 43200000) AS\n" + + " `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__1__parent_count`, `container.name` AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`,\n" + + " quantiles(0.100000)(`docker.cpu.total.pct`) AS `metric__0__1__2_col_0`,\n" + + " quantiles(0.750000)(`docker.cpu.total.pct`) AS `metric__0__1__2_col_1`,\n" + + " quantiles(0.990000)(`docker.cpu.total.pct`) AS `metric__0__1__2_col_2`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 43200000) AS\n" + + " `aggr__0__key_0`, `container.name` AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__1__order_1_rank`<=6\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [55] TestName: "terms order by percentile_ranks", @@ -2816,15 +2714,14 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1_col_1", 9.813812484840025), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "Cancelled" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("DistanceKilometers"<=0)/count(*)*100 AS "metric__0__1_col_0", - countIf("DistanceKilometers"<=50)/count(*)*100 AS "metric__0__1_col_1" - FROM __quesma_table_name - GROUP BY "Cancelled" AS "aggr__0__key_0" - ORDER BY "metric__0__1_col_0" DESC, "aggr__0__key_0" ASC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `Cancelled` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " countIf(`DistanceKilometers`<=0)/count(*)*100 AS `metric__0__1_col_0`,\n" + + " countIf(`DistanceKilometers`<=50)/count(*)*100 AS `metric__0__1_col_1`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `Cancelled` AS `aggr__0__key_0`\n" + + "ORDER BY `metric__0__1_col_0` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 6", }, { // [56] TestName: "simple histogram with null values, no missing parameter", @@ -2911,17 +2808,16 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__sample__histo__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__sample__count", - floor("taxful_total_price"/224.19300000000004)*224.19300000000004 AS - "aggr__sample__histo__key_0", count(*) AS "aggr__sample__histo__count" - FROM ( - SELECT "taxful_total_price" - FROM __quesma_table_name - LIMIT 20000) - GROUP BY floor("taxful_total_price"/224.19300000000004)*224.19300000000004 AS - "aggr__sample__histo__key_0" - ORDER BY "aggr__sample__histo__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " floor(`taxful_total_price`/224.19300000000004)*224.19300000000004 AS\n" + + " `aggr__sample__histo__key_0`, count(*) AS `aggr__sample__histo__count`\n" + + "FROM (\n" + + " SELECT `taxful_total_price`\n" + + " FROM `__quesma_table_name`\n" + + " LIMIT 20000)\n" + + "GROUP BY floor(`taxful_total_price`/224.19300000000004)*224.19300000000004 AS\n" + + " `aggr__sample__histo__key_0`\n" + + "ORDER BY `aggr__sample__histo__key_0` ASC", }, { // [57] TestName: "histogram with null values, no missing parameter, and some subaggregation", @@ -3042,32 +2938,30 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__histo__0__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count" - FROM ( - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count", - dense_rank() OVER (ORDER BY "aggr__histo__key_0" ASC) AS - "aggr__histo__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__histo__key_0" ORDER BY - "aggr__histo__0__count" DESC, "aggr__histo__0__key_0" ASC) AS - "aggr__histo__0__order_1_rank" - FROM ( - SELECT floor("taxful_total_price"/224.19300000000004)*224.19300000000004 AS - "aggr__histo__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__count", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__0__parent_count", "type" AS "aggr__histo__0__key_0", - count(*) AS "aggr__histo__0__count" - FROM __quesma_table_name - GROUP BY floor("taxful_total_price"/224.19300000000004)*224.19300000000004 - AS "aggr__histo__key_0", "type" AS "aggr__histo__0__key_0")) - WHERE "aggr__histo__0__order_1_rank"<=11 - ORDER BY "aggr__histo__order_1_rank" ASC, "aggr__histo__0__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`\n" + + "FROM (\n" + + " SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__histo__key_0` ASC) AS\n" + + " `aggr__histo__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__histo__key_0` ORDER BY\n" + + " `aggr__histo__0__count` DESC, `aggr__histo__0__key_0` ASC) AS\n" + + " `aggr__histo__0__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(`taxful_total_price`/224.19300000000004)*224.19300000000004 AS\n" + + " `aggr__histo__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS `aggr__histo__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS\n" + + " `aggr__histo__0__parent_count`, `type` AS `aggr__histo__0__key_0`,\n" + + " count(*) AS `aggr__histo__0__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY floor(`taxful_total_price`/224.19300000000004)*224.19300000000004\n" + + " AS `aggr__histo__key_0`, `type` AS `aggr__histo__0__key_0`))\n" + + "WHERE `aggr__histo__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__histo__order_1_rank` ASC, `aggr__histo__0__order_1_rank` ASC", }, { // [58] TestName: "simple histogram with null values and missing parameter", @@ -3159,18 +3053,17 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__sample__histo__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__sample__count", - floor(COALESCE("taxful_total_price", 80)/224.19300000000004)* - 224.19300000000004 AS "aggr__sample__histo__key_0", - count(*) AS "aggr__sample__histo__count" - FROM ( - SELECT "taxful_total_price" - FROM __quesma_table_name - LIMIT 20000) - GROUP BY floor(COALESCE("taxful_total_price", 80)/224.19300000000004)* - 224.19300000000004 AS "aggr__sample__histo__key_0" - ORDER BY "aggr__sample__histo__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " floor(COALESCE(`taxful_total_price`, 80)/224.19300000000004)*\n" + + " 224.19300000000004 AS `aggr__sample__histo__key_0`,\n" + + " count(*) AS `aggr__sample__histo__count`\n" + + "FROM (\n" + + " SELECT `taxful_total_price`\n" + + " FROM `__quesma_table_name`\n" + + " LIMIT 20000)\n" + + "GROUP BY floor(COALESCE(`taxful_total_price`, 80)/224.19300000000004)*\n" + + " 224.19300000000004 AS `aggr__sample__histo__key_0`\n" + + "ORDER BY `aggr__sample__histo__key_0` ASC", }, { // [59] TestName: "histogram with null values, missing parameter, and some subaggregation", @@ -3310,33 +3203,31 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__histo__0__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count" - FROM ( - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count", - dense_rank() OVER (ORDER BY "aggr__histo__key_0" ASC) AS - "aggr__histo__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__histo__key_0" ORDER BY - "aggr__histo__0__count" DESC, "aggr__histo__0__key_0" ASC) AS - "aggr__histo__0__order_1_rank" - FROM ( - SELECT floor(COALESCE("taxful_total_price", 800)/224.19300000000004)* - 224.19300000000004 AS "aggr__histo__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__count", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__0__parent_count", "type" AS "aggr__histo__0__key_0", - count(*) AS "aggr__histo__0__count" - FROM __quesma_table_name - GROUP BY floor(COALESCE("taxful_total_price", 800)/224.19300000000004)* - 224.19300000000004 AS "aggr__histo__key_0", - "type" AS "aggr__histo__0__key_0")) - WHERE "aggr__histo__0__order_1_rank"<=11 - ORDER BY "aggr__histo__order_1_rank" ASC, "aggr__histo__0__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`\n" + + "FROM (\n" + + " SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__histo__key_0` ASC) AS\n" + + " `aggr__histo__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__histo__key_0` ORDER BY\n" + + " `aggr__histo__0__count` DESC, `aggr__histo__0__key_0` ASC) AS\n" + + " `aggr__histo__0__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(COALESCE(`taxful_total_price`, 800)/224.19300000000004)*\n" + + " 224.19300000000004 AS `aggr__histo__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS `aggr__histo__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS\n" + + " `aggr__histo__0__parent_count`, `type` AS `aggr__histo__0__key_0`,\n" + + " count(*) AS `aggr__histo__0__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY floor(COALESCE(`taxful_total_price`, 800)/224.19300000000004)*\n" + + " 224.19300000000004 AS `aggr__histo__key_0`,\n" + + " `type` AS `aggr__histo__0__key_0`))\n" + + "WHERE `aggr__histo__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__histo__order_1_rank` ASC, `aggr__histo__0__order_1_rank` ASC", }, { // [60] TestName: "simple date_histogram with null values, no missing parameter (DateTime)", @@ -3429,17 +3320,16 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__sample__histo__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__sample__count", - toInt64(toUnixTimestamp("customer_birth_date") / 30) AS - "aggr__sample__histo__key_0", count(*) AS "aggr__sample__histo__count" - FROM ( - SELECT "customer_birth_date" - FROM __quesma_table_name - LIMIT 20000) - GROUP BY toInt64(toUnixTimestamp("customer_birth_date") / 30) AS - "aggr__sample__histo__key_0" - ORDER BY "aggr__sample__histo__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " toInt64(toUnixTimestamp(`customer_birth_date`) / 30) AS\n" + + " `aggr__sample__histo__key_0`, count(*) AS `aggr__sample__histo__count`\n" + + "FROM (\n" + + " SELECT `customer_birth_date`\n" + + " FROM `__quesma_table_name`\n" + + " LIMIT 20000)\n" + + "GROUP BY toInt64(toUnixTimestamp(`customer_birth_date`) / 30) AS\n" + + " `aggr__sample__histo__key_0`\n" + + "ORDER BY `aggr__sample__histo__key_0` ASC", }, { // [61] TestName: "date_histogram with null values, no missing parameter, and some subaggregation", @@ -3561,32 +3451,30 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__histo__0__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count" - FROM ( - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count", - dense_rank() OVER (ORDER BY "aggr__histo__key_0" ASC) AS - "aggr__histo__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__histo__key_0" ORDER BY - "aggr__histo__0__count" DESC, "aggr__histo__0__key_0" ASC) AS - "aggr__histo__0__order_1_rank" - FROM ( - SELECT toInt64(toUnixTimestamp64Milli("customer_birth_date_datetime64") / 30000) AS - "aggr__histo__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__count", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__0__parent_count", "type" AS "aggr__histo__0__key_0", - count(*) AS "aggr__histo__0__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("customer_birth_date_datetime64") / 30000) AS - "aggr__histo__key_0", "type" AS "aggr__histo__0__key_0")) - WHERE "aggr__histo__0__order_1_rank"<=11 - ORDER BY "aggr__histo__order_1_rank" ASC, "aggr__histo__0__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`\n" + + "FROM (\n" + + " SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__histo__key_0` ASC) AS\n" + + " `aggr__histo__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__histo__key_0` ORDER BY\n" + + " `aggr__histo__0__count` DESC, `aggr__histo__0__key_0` ASC) AS\n" + + " `aggr__histo__0__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64(toUnixTimestamp64Milli(`customer_birth_date_datetime64`) / 30000) AS\n" + + " `aggr__histo__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS `aggr__histo__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS\n" + + " `aggr__histo__0__parent_count`, `type` AS `aggr__histo__0__key_0`,\n" + + " count(*) AS `aggr__histo__0__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64(toUnixTimestamp64Milli(`customer_birth_date_datetime64`) / 30000) AS\n" + + " `aggr__histo__key_0`, `type` AS `aggr__histo__0__key_0`))\n" + + "WHERE `aggr__histo__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__histo__order_1_rank` ASC, `aggr__histo__0__order_1_rank` ASC", }, { // [62] TestName: "date_histogram with null values, missing parameter (DateTime, not DateTime64), and some subaggregation", @@ -3744,33 +3632,31 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__histo__0__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count" - FROM ( - SELECT "aggr__histo__key_0", "aggr__histo__count", - "aggr__histo__0__parent_count", "aggr__histo__0__key_0", - "aggr__histo__0__count", - dense_rank() OVER (ORDER BY "aggr__histo__key_0" ASC) AS - "aggr__histo__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__histo__key_0" ORDER BY - "aggr__histo__0__count" DESC, "aggr__histo__0__key_0" ASC) AS - "aggr__histo__0__order_1_rank" - FROM ( - SELECT toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706021760))) / 30) AS "aggr__histo__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__count", - sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS - "aggr__histo__0__parent_count", "type" AS "aggr__histo__0__key_0", - count(*) AS "aggr__histo__0__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706021760))) / 30) AS "aggr__histo__key_0", - "type" AS "aggr__histo__0__key_0")) - WHERE "aggr__histo__0__order_1_rank"<=11 - ORDER BY "aggr__histo__order_1_rank" ASC, "aggr__histo__0__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`\n" + + "FROM (\n" + + " SELECT `aggr__histo__key_0`, `aggr__histo__count`,\n" + + " `aggr__histo__0__parent_count`, `aggr__histo__0__key_0`,\n" + + " `aggr__histo__0__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__histo__key_0` ASC) AS\n" + + " `aggr__histo__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__histo__key_0` ORDER BY\n" + + " `aggr__histo__0__count` DESC, `aggr__histo__0__key_0` ASC) AS\n" + + " `aggr__histo__0__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706021760))) / 30) AS `aggr__histo__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS `aggr__histo__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__histo__key_0`) AS\n" + + " `aggr__histo__0__parent_count`, `type` AS `aggr__histo__0__key_0`,\n" + + " count(*) AS `aggr__histo__0__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706021760))) / 30) AS `aggr__histo__key_0`,\n" + + " `type` AS `aggr__histo__0__key_0`))\n" + + "WHERE `aggr__histo__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__histo__order_1_rank` ASC, `aggr__histo__0__order_1_rank` ASC", }, { // [63] TestName: "date_histogram with missing, different formats, and types (DateTime/DateTime64)", @@ -3907,43 +3793,42 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__histo5__count", int64(4675)), }}}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706878800))) / 90) AS "aggr__histo1__key_0", - count(*) AS "aggr__histo1__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706878800))) / 90) AS "aggr__histo1__key_0" - ORDER BY "aggr__histo1__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706878800))) / 90) AS `aggr__histo1__key_0`,\n" + + " count(*) AS `aggr__histo1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706878800))) / 90) AS `aggr__histo1__key_0`\n" + + "ORDER BY `aggr__histo1__key_0` ASC", ExpectedAdditionalPancakeSQLs: []string{ - `SELECT toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706878800))) / 90) AS "aggr__histo2__key_0", - count(*) AS "aggr__histo2__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706878800))) / 90) AS "aggr__histo2__key_0" - ORDER BY "aggr__histo2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date_datetime64", - fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo3__key_0", - count(*) AS "aggr__histo3__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date_datetime64", - fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo3__key_0" - ORDER BY "aggr__histo3__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date_datetime64", - fromUnixTimestamp64Milli(1706853600000))) / 90000) AS "aggr__histo4__key_0", - count(*) AS "aggr__histo4__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date_datetime64", - fromUnixTimestamp64Milli(1706853600000))) / 90000) AS "aggr__histo4__key_0" - ORDER BY "aggr__histo4__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706853600))) / 90) AS "aggr__histo5__key_0", - count(*) AS "aggr__histo5__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(COALESCE("customer_birth_date", - fromUnixTimestamp(1706853600))) / 90) AS "aggr__histo5__key_0" - ORDER BY "aggr__histo5__key_0" ASC`, + "SELECT toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706878800))) / 90) AS `aggr__histo2__key_0`,\n" + + " count(*) AS `aggr__histo2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706878800))) / 90) AS `aggr__histo2__key_0`\n" + + "ORDER BY `aggr__histo2__key_0` ASC", + "SELECT toInt64(toUnixTimestamp64Milli(COALESCE(`customer_birth_date_datetime64`,\n" + + " fromUnixTimestamp64Milli(1706878800000))) / 90000) AS `aggr__histo3__key_0`,\n" + + " count(*) AS `aggr__histo3__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE(`customer_birth_date_datetime64`,\n" + + " fromUnixTimestamp64Milli(1706878800000))) / 90000) AS `aggr__histo3__key_0`\n" + + "ORDER BY `aggr__histo3__key_0` ASC", + "SELECT toInt64(toUnixTimestamp64Milli(COALESCE(`customer_birth_date_datetime64`,\n" + + " fromUnixTimestamp64Milli(1706853600000))) / 90000) AS `aggr__histo4__key_0`,\n" + + " count(*) AS `aggr__histo4__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE(`customer_birth_date_datetime64`,\n" + + " fromUnixTimestamp64Milli(1706853600000))) / 90000) AS `aggr__histo4__key_0`\n" + + "ORDER BY `aggr__histo4__key_0` ASC", + "SELECT toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706853600))) / 90) AS `aggr__histo5__key_0`,\n" + + " count(*) AS `aggr__histo5__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(COALESCE(`customer_birth_date`,\n" + + " fromUnixTimestamp(1706853600))) / 90) AS `aggr__histo5__key_0`\n" + + "ORDER BY `aggr__histo5__key_0` ASC", }, }, { // [64] @@ -4101,28 +3986,27 @@ var AggregationTests2 = []AggregationTestCase{ }}, }, }, - ExpectedPancakeSQL: ` - SELECT floor("total_quantity"/0)*0 AS "aggr__interval-0__key_0", - count(*) AS "aggr__interval-0__count" - FROM __quesma_table_name - GROUP BY floor("total_quantity"/0)*0 AS "aggr__interval-0__key_0" - ORDER BY "aggr__interval-0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`total_quantity`/0)*0 AS `aggr__interval-0__key_0`,\n" + + " count(*) AS `aggr__interval-0__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`total_quantity`/0)*0 AS `aggr__interval-0__key_0`\n" + + "ORDER BY `aggr__interval-0__key_0` ASC", ExpectedAdditionalPancakeSQLs: []string{ - `SELECT floor("total_quantity"/0.5)*0.5 AS "aggr__interval-0.5__key_0", - count(*) AS "aggr__interval-0.5__count" - FROM __quesma_table_name - GROUP BY floor("total_quantity"/0.5)*0.5 AS "aggr__interval-0.5__key_0" - ORDER BY "aggr__interval-0.5__key_0" ASC`, - `SELECT "total_quantity" AS "aggr__interval-1__key_0", - count(*) AS "aggr__interval-1__count" - FROM __quesma_table_name - GROUP BY "total_quantity" AS "aggr__interval-1__key_0" - ORDER BY "aggr__interval-1__key_0" ASC`, - `SELECT floor("total_quantity"/2)*2 AS "aggr__interval-2__key_0", - count(*) AS "aggr__interval-2__count" - FROM __quesma_table_name - GROUP BY floor("total_quantity"/2)*2 AS "aggr__interval-2__key_0" - ORDER BY "aggr__interval-2__key_0" ASC`, + "SELECT floor(`total_quantity`/0.5)*0.5 AS `aggr__interval-0.5__key_0`,\n" + + " count(*) AS `aggr__interval-0.5__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`total_quantity`/0.5)*0.5 AS `aggr__interval-0.5__key_0`\n" + + "ORDER BY `aggr__interval-0.5__key_0` ASC", + "SELECT `total_quantity` AS `aggr__interval-1__key_0`,\n" + + " count(*) AS `aggr__interval-1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `total_quantity` AS `aggr__interval-1__key_0`\n" + + "ORDER BY `aggr__interval-1__key_0` ASC", + "SELECT floor(`total_quantity`/2)*2 AS `aggr__interval-2__key_0`,\n" + + " count(*) AS `aggr__interval-2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`total_quantity`/2)*2 AS `aggr__interval-2__key_0`\n" + + "ORDER BY `aggr__interval-2__key_0` ASC", }, }, { // [65] @@ -4280,13 +4164,12 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__my_buckets__count", int64(8)), }}, }, - ExpectedPancakeSQL: ` - SELECT "product" AS "aggr__my_buckets__key_0", - count(*) AS "aggr__my_buckets__count" - FROM __quesma_table_name - GROUP BY "product" AS "aggr__my_buckets__key_0" - ORDER BY "aggr__my_buckets__count" DESC, "aggr__my_buckets__key_0" ASC - LIMIT 11`, + ExpectedPancakeSQL: "SELECT `product` AS `aggr__my_buckets__key_0`,\n" + + " count(*) AS `aggr__my_buckets__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `product` AS `aggr__my_buckets__key_0`\n" + + "ORDER BY `aggr__my_buckets__count` DESC, `aggr__my_buckets__key_0` ASC\n" + + "LIMIT 11", }, { // [66] TestName: "simplest composite: 1 histogram (with size)", @@ -4375,13 +4258,12 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__my_buckets__count", 100000000), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("price"/5)*5 AS "aggr__my_buckets__key_0", - count(*) AS "aggr__my_buckets__count" - FROM __quesma_table_name - GROUP BY floor("price"/5)*5 AS "aggr__my_buckets__key_0" - ORDER BY "aggr__my_buckets__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT floor(`price`/5)*5 AS `aggr__my_buckets__key_0`,\n" + + " count(*) AS `aggr__my_buckets__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`price`/5)*5 AS `aggr__my_buckets__key_0`\n" + + "ORDER BY `aggr__my_buckets__key_0` ASC\n" + + "LIMIT 4", }, { // [67] TestName: "simplest composite: 1 date_histogram", @@ -4460,14 +4342,13 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__my_buckets__count", int64(567)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 86400000) AS - "aggr__my_buckets__key_0", count(*) AS "aggr__my_buckets__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 86400000) AS - "aggr__my_buckets__key_0" - ORDER BY "aggr__my_buckets__key_0" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 86400000) AS\n" + + " `aggr__my_buckets__key_0`, count(*) AS `aggr__my_buckets__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 86400000) AS\n" + + " `aggr__my_buckets__key_0`\n" + + "ORDER BY `aggr__my_buckets__key_0` ASC\n" + + "LIMIT 3", }, { // [68] TestName: "simplest composite: 1 geotile_grid", @@ -4554,21 +4435,20 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__my_buckets__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) - AS "aggr__my_buckets__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) - AS "aggr__my_buckets__key_1", count(*) AS "aggr__my_buckets__count" - FROM __quesma_table_name - GROUP BY FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) - AS "aggr__my_buckets__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) - AS "aggr__my_buckets__key_1" - ORDER BY "aggr__my_buckets__count" DESC, "aggr__my_buckets__key_0" ASC, - "aggr__my_buckets__key_1" ASC - LIMIT 10`, + ExpectedPancakeSQL: "SELECT FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8))\n" + + " AS `aggr__my_buckets__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8))\n" + + " AS `aggr__my_buckets__key_1`, count(*) AS `aggr__my_buckets__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8))\n" + + " AS `aggr__my_buckets__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8))\n" + + " AS `aggr__my_buckets__key_1`\n" + + "ORDER BY `aggr__my_buckets__count` DESC, `aggr__my_buckets__key_0` ASC,\n" + + " `aggr__my_buckets__key_1` ASC\n" + + "LIMIT 10", }, { // [69] TestName: "composite: 2 sources + 1 subaggregation", @@ -4693,18 +4573,17 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__my_buckets__the_avg_col_0", 100000000), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 86400000) AS - "aggr__my_buckets__key_0", "product" AS "aggr__my_buckets__key_1", - count(*) AS "aggr__my_buckets__count", - avgOrNull("price") AS "metric__my_buckets__the_avg_col_0" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 86400000) AS - "aggr__my_buckets__key_0", - "product" AS "aggr__my_buckets__key_1" - ORDER BY "aggr__my_buckets__count" DESC, "aggr__my_buckets__key_0" ASC, - "aggr__my_buckets__key_1" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 86400000) AS\n" + + " `aggr__my_buckets__key_0`, `product` AS `aggr__my_buckets__key_1`,\n" + + " count(*) AS `aggr__my_buckets__count`,\n" + + " avgOrNull(`price`) AS `metric__my_buckets__the_avg_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 86400000) AS\n" + + " `aggr__my_buckets__key_0`,\n" + + " `product` AS `aggr__my_buckets__key_1`\n" + + "ORDER BY `aggr__my_buckets__count` DESC, `aggr__my_buckets__key_0` ASC,\n" + + " `aggr__my_buckets__key_1` ASC\n" + + "LIMIT 4", }, { // [70] TestName: "simplest terms with exclude (array of values)", @@ -4754,14 +4633,13 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__count", int64(3261)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__parent_count", - if("chess_goat" NOT IN tuple('Carlsen', 'Kasparov', 'Fis._er\'*'), "chess_goat", NULL) - AS "aggr__1__key_0", count(*) AS "aggr__1__count" - FROM __quesma_table_name - GROUP BY if("chess_goat" NOT IN tuple('Carlsen', 'Kasparov', 'Fis._er\'*'), "chess_goat", NULL) AS "aggr__1__key_0" - ORDER BY "aggr__1__count" DESC, "aggr__1__key_0" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__parent_count`,\n" + + " if(`chess_goat` NOT IN tuple('Carlsen', 'Kasparov', 'Fis._er\\'*'), `chess_goat`, NULL)\n" + + " AS `aggr__1__key_0`, count(*) AS `aggr__1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY if(`chess_goat` NOT IN tuple('Carlsen', 'Kasparov', 'Fis._er\\'*'), `chess_goat`, NULL) AS `aggr__1__key_0`\n" + + "ORDER BY `aggr__1__count` DESC, `aggr__1__key_0` ASC\n" + + "LIMIT 3", }, { // [71] TestName: "simplest terms with exclude (single value, no regex)", @@ -4807,14 +4685,13 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__count", int64(3300)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__parent_count", - if("agi_birth_year"!=2025, "agi_birth_year", NULL) AS "aggr__1__key_0", - count(*) AS "aggr__1__count" - FROM __quesma_table_name - GROUP BY if("agi_birth_year"!=2025, "agi_birth_year", NULL) AS "aggr__1__key_0" - ORDER BY "aggr__1__count" DESC, "aggr__1__key_0" ASC - LIMIT 2`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__parent_count`,\n" + + " if(`agi_birth_year`!=2025, `agi_birth_year`, NULL) AS `aggr__1__key_0`,\n" + + " count(*) AS `aggr__1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY if(`agi_birth_year`!=2025, `agi_birth_year`, NULL) AS `aggr__1__key_0`\n" + + "ORDER BY `aggr__1__count` DESC, `aggr__1__key_0` ASC\n" + + "LIMIT 2", }, { // [72] TestName: "simplest terms with exclude (empty array)", @@ -4860,13 +4737,12 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__count", int64(300)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__parent_count", - "agi_birth_year" AS "aggr__1__key_0", count(*) AS "aggr__1__count" - FROM __quesma_table_name - GROUP BY "agi_birth_year" AS "aggr__1__key_0" - ORDER BY "aggr__1__count" DESC, "aggr__1__key_0" ASC - LIMIT 2`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__parent_count`,\n" + + " `agi_birth_year` AS `aggr__1__key_0`, count(*) AS `aggr__1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `agi_birth_year` AS `aggr__1__key_0`\n" + + "ORDER BY `aggr__1__count` DESC, `aggr__1__key_0` ASC\n" + + "LIMIT 2", }, { // [73] TestName: "simplest terms with exclude (of strings), regression test", @@ -4912,14 +4788,13 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__count", int64(300)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__parent_count", - if("chess_goat" NOT IN 'abc', "chess_goat", NULL) AS "aggr__1__key_0", - count(*) AS "aggr__1__count" - FROM __quesma_table_name - GROUP BY if("chess_goat" NOT IN 'abc', "chess_goat", NULL) AS "aggr__1__key_0" - ORDER BY "aggr__1__count" DESC, "aggr__1__key_0" ASC - LIMIT 2`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__parent_count`,\n" + + " if(`chess_goat` NOT IN 'abc', `chess_goat`, NULL) AS `aggr__1__key_0`,\n" + + " count(*) AS `aggr__1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY if(`chess_goat` NOT IN 'abc', `chess_goat`, NULL) AS `aggr__1__key_0`\n" + + "ORDER BY `aggr__1__count` DESC, `aggr__1__key_0` ASC\n" + + "LIMIT 2", }, { // [74] TestName: "terms with exclude (more complex, string field with exclude regex)", @@ -4966,14 +4841,13 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__parent_count", - if("chess_goat" NOT LIKE 'K%', "chess_goat", NULL) AS "aggr__1__key_0", - count(*) AS "aggr__1__count" - FROM __quesma_table_name - GROUP BY if("chess_goat" NOT LIKE 'K%', "chess_goat", NULL) AS "aggr__1__key_0" - ORDER BY "aggr__1__count" DESC, "aggr__1__key_0" ASC - LIMIT 2`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__parent_count`,\n" + + " if(`chess_goat` NOT LIKE 'K%', `chess_goat`, NULL) AS `aggr__1__key_0`,\n" + + " count(*) AS `aggr__1__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY if(`chess_goat` NOT LIKE 'K%', `chess_goat`, NULL) AS `aggr__1__key_0`\n" + + "ORDER BY `aggr__1__count` DESC, `aggr__1__key_0` ASC\n" + + "LIMIT 2", }, { // [75] TestName: "complex terms with exclude: nested terms + 2 metrics", @@ -5118,40 +4992,38 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__terms1__terms2__metric2_col_0", 99314.3501429406), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__terms1__parent_count", "aggr__terms1__key_0", - "aggr__terms1__count", "metric__terms1__metric1_col_0", - "aggr__terms1__terms2__parent_count", "aggr__terms1__terms2__key_0", - "aggr__terms1__terms2__count", "metric__terms1__terms2__metric2_col_0" - FROM ( - SELECT "aggr__terms1__parent_count", "aggr__terms1__key_0", - "aggr__terms1__count", "metric__terms1__metric1_col_0", - "aggr__terms1__terms2__parent_count", "aggr__terms1__terms2__key_0", - "aggr__terms1__terms2__count", "metric__terms1__terms2__metric2_col_0", - dense_rank() OVER (ORDER BY "aggr__terms1__count" DESC, - "aggr__terms1__key_0" ASC) AS "aggr__terms1__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__terms1__key_0" ORDER BY - "aggr__terms1__terms2__count" DESC, "aggr__terms1__terms2__key_0" ASC) AS - "aggr__terms1__terms2__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__terms1__parent_count", - if("Carrier" NOT IN tuple('a', 'b'), "Carrier", NULL) AS "aggr__terms1__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__terms1__key_0") AS - "aggr__terms1__count", - avgOrNullMerge(avgOrNullState("DistanceMiles")) OVER (PARTITION BY - "aggr__terms1__key_0") AS "metric__terms1__metric1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__terms1__key_0") AS - "aggr__terms1__terms2__parent_count", - "DestCityName" AS "aggr__terms1__terms2__key_0", - count(*) AS "aggr__terms1__terms2__count", - sumOrNull("AvgTicketPrice") AS "metric__terms1__terms2__metric2_col_0" - FROM __quesma_table_name - GROUP BY if("Carrier" NOT IN tuple('a', 'b'), "Carrier", NULL) AS - "aggr__terms1__key_0", "DestCityName" AS "aggr__terms1__terms2__key_0")) - WHERE ("aggr__terms1__order_1_rank"<=3 AND "aggr__terms1__terms2__order_1_rank" - <=2) - ORDER BY "aggr__terms1__order_1_rank" ASC, - "aggr__terms1__terms2__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__terms1__parent_count`, `aggr__terms1__key_0`,\n" + + " `aggr__terms1__count`, `metric__terms1__metric1_col_0`,\n" + + " `aggr__terms1__terms2__parent_count`, `aggr__terms1__terms2__key_0`,\n" + + " `aggr__terms1__terms2__count`, `metric__terms1__terms2__metric2_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__terms1__parent_count`, `aggr__terms1__key_0`,\n" + + " `aggr__terms1__count`, `metric__terms1__metric1_col_0`,\n" + + " `aggr__terms1__terms2__parent_count`, `aggr__terms1__terms2__key_0`,\n" + + " `aggr__terms1__terms2__count`, `metric__terms1__terms2__metric2_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__terms1__count` DESC,\n" + + " `aggr__terms1__key_0` ASC) AS `aggr__terms1__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__terms1__key_0` ORDER BY\n" + + " `aggr__terms1__terms2__count` DESC, `aggr__terms1__terms2__key_0` ASC) AS\n" + + " `aggr__terms1__terms2__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__terms1__parent_count`,\n" + + " if(`Carrier` NOT IN tuple('a', 'b'), `Carrier`, NULL) AS `aggr__terms1__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__terms1__key_0`) AS\n" + + " `aggr__terms1__count`,\n" + + " avgOrNullMerge(avgOrNullState(`DistanceMiles`)) OVER (PARTITION BY\n" + + " `aggr__terms1__key_0`) AS `metric__terms1__metric1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__terms1__key_0`) AS\n" + + " `aggr__terms1__terms2__parent_count`,\n" + + " `DestCityName` AS `aggr__terms1__terms2__key_0`,\n" + + " count(*) AS `aggr__terms1__terms2__count`,\n" + + " sumOrNull(`AvgTicketPrice`) AS `metric__terms1__terms2__metric2_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY if(`Carrier` NOT IN tuple('a', 'b'), `Carrier`, NULL) AS\n" + + " `aggr__terms1__key_0`, `DestCityName` AS `aggr__terms1__terms2__key_0`))\n" + + "WHERE (`aggr__terms1__order_1_rank`<=3 AND `aggr__terms1__terms2__order_1_rank`<=2)\n" + + "ORDER BY `aggr__terms1__order_1_rank` ASC,\n" + + " `aggr__terms1__terms2__order_1_rank` ASC", }, { // [76] TestName: "terms with exclude, but with branched off aggregation tree", @@ -5265,16 +5137,15 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__terms1__metric1_col_0", 6.20), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__terms1__parent_count", - if("Carrier" NOT IN tuple('a', 'b'), "Carrier", NULL) AS "aggr__terms1__key_0" - , count(*) AS "aggr__terms1__count", - avgOrNull("DistanceMiles") AS "metric__terms1__metric1_col_0" - FROM __quesma_table_name - GROUP BY if("Carrier" NOT IN tuple('a', 'b'), "Carrier", NULL) AS - "aggr__terms1__key_0" - ORDER BY "aggr__terms1__count" DESC, "aggr__terms1__key_0" ASC - LIMIT 2`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__terms1__parent_count`,\n" + + " if(`Carrier` NOT IN tuple('a', 'b'), `Carrier`, NULL) AS `aggr__terms1__key_0`,\n" + + " count(*) AS `aggr__terms1__count`,\n" + + " avgOrNull(`DistanceMiles`) AS `metric__terms1__metric1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY if(`Carrier` NOT IN tuple('a', 'b'), `Carrier`, NULL) AS\n" + + " `aggr__terms1__key_0`\n" + + "ORDER BY `aggr__terms1__count` DESC, `aggr__terms1__key_0` ASC\n" + + "LIMIT 2", ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{{ {Cols: []model.QueryResultCol{ model.NewQueryResultCol("aggr__terms2__parent_count", int64(13014)), @@ -5295,16 +5166,17 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("metric__terms2__metric1_col_0", 42), }}, }}, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT sum(count(*)) OVER () AS "aggr__terms2__parent_count", - if("Carrier" NOT IN tuple('Logstash Airways', '.*'), "Carrier", NULL) AS - "aggr__terms2__key_0", count(*) AS "aggr__terms2__count", - avgOrNull("DistanceMiles") AS "metric__terms2__metric1_col_0" - FROM __quesma_table_name - GROUP BY if("Carrier" NOT IN tuple('Logstash Airways', '.*'), "Carrier", NULL) - AS "aggr__terms2__key_0" - ORDER BY "aggr__terms2__count" DESC, "aggr__terms2__key_0" ASC - LIMIT 3`}, + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT sum(count(*)) OVER () AS `aggr__terms2__parent_count`,\n" + + " if(`Carrier` NOT IN tuple('Logstash Airways', '.*'), `Carrier`, NULL) AS\n" + + " `aggr__terms2__key_0`, count(*) AS `aggr__terms2__count`,\n" + + " avgOrNull(`DistanceMiles`) AS `metric__terms2__metric1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY if(`Carrier` NOT IN tuple('Logstash Airways', '.*'), `Carrier`, NULL)\n" + + " AS `aggr__terms2__key_0`\n" + + "ORDER BY `aggr__terms2__count` DESC, `aggr__terms2__key_0` ASC\n" + + "LIMIT 3", + }, }, { // [77] TestName: "terms with bool field", @@ -5360,13 +5232,12 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__terms__count", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__terms__parent_count", - "Cancelled" AS "aggr__terms__key_0", count(*) AS "aggr__terms__count" - FROM __quesma_table_name - GROUP BY "Cancelled" AS "aggr__terms__key_0" - ORDER BY "aggr__terms__count" DESC, "aggr__terms__key_0" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__terms__parent_count`,\n" + + " `Cancelled` AS `aggr__terms__key_0`, count(*) AS `aggr__terms__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `Cancelled` AS `aggr__terms__key_0`\n" + + "ORDER BY `aggr__terms__count` DESC, `aggr__terms__key_0` ASC\n" + + "LIMIT 3", }, { // [78] TestName: `Escaping of ', \, \n, and \t in some example aggregations. No tests for other escape characters, e.g. \r or 'b. Add if needed.`, @@ -5426,17 +5297,13 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__terms__count", int64(5362)), }}, }, - ExpectedPancakeSQL: ` - SELECT avgOrNullMerge(avgOrNullState("@timestamp's\\")) OVER () AS - "metric__avg_col_0", sum(count(*)) OVER () AS "aggr__terms__parent_count", - COALESCE("agent", 'quote \' and slash \\ Also -') AS "aggr__terms__key_0", - count(*) AS "aggr__terms__count" - FROM __quesma_table_name - GROUP BY COALESCE("agent", 'quote \' and slash \\ Also -') AS - "aggr__terms__key_0" - ORDER BY "aggr__terms__count" DESC, "aggr__terms__key_0" ASC - LIMIT 1`, + ExpectedPancakeSQL: "SELECT avgOrNullMerge(avgOrNullState(`@timestamp's\\`)) OVER () AS `metric__avg_col_0`, sum(count(*)) OVER () AS `aggr__terms__parent_count`, COALESCE(`agent`,'quote \\' and slash \\\\ Also\n') " + + "AS `aggr__terms__key_0`, count(*) AS `aggr__terms__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY COALESCE(`agent`, 'quote \\' and slash \\\\ Also\n" + + "') AS `\n" + + "aggr__terms__key_0` ORDER BY `aggr__terms__count` DESC, `aggr__terms__key_0`\n" + + " ASC \n" + + "LIMIT 1", }, } diff --git a/platform/testdata/clients/clover.go b/platform/testdata/clients/clover.go index 52ab9b493..14fdcb8de 100644 --- a/platform/testdata/clients/clover.go +++ b/platform/testdata/clients/clover.go @@ -156,31 +156,29 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__1__timeseries__count", int64(301)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__1__parent_count", "aggr__1__key_0", "aggr__1__count", - "aggr__1__timeseries__key_0", "aggr__1__timeseries__count" - FROM ( - SELECT "aggr__1__parent_count", "aggr__1__key_0", "aggr__1__count", - "aggr__1__timeseries__key_0", "aggr__1__timeseries__count", - dense_rank() OVER (ORDER BY "aggr__1__count" DESC, "aggr__1__key_0" ASC) AS - "aggr__1__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__1__key_0" ORDER BY - "aggr__1__timeseries__key_0" ASC) AS "aggr__1__timeseries__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__1__parent_count", - "nobel_laureate" AS "aggr__1__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__1__key_0") AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS - "aggr__1__timeseries__key_0", count(*) AS "aggr__1__timeseries__count" - FROM __quesma_table_name - GROUP BY "nobel_laureate" AS "aggr__1__key_0", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS - "aggr__1__timeseries__key_0")) - WHERE "aggr__1__order_1_rank"<=11 - ORDER BY "aggr__1__order_1_rank" ASC, "aggr__1__timeseries__order_1_rank" ASC - `, + ExpectedPancakeSQL: "SELECT `aggr__1__parent_count`, `aggr__1__key_0`, `aggr__1__count`,\n" + + " `aggr__1__timeseries__key_0`, `aggr__1__timeseries__count`\n" + + "FROM (\n" + + " SELECT `aggr__1__parent_count`, `aggr__1__key_0`, `aggr__1__count`,\n" + + " `aggr__1__timeseries__key_0`, `aggr__1__timeseries__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__1__count` DESC, `aggr__1__key_0` ASC) AS\n" + + " `aggr__1__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__1__key_0` ORDER BY\n" + + " `aggr__1__timeseries__key_0` ASC) AS `aggr__1__timeseries__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__parent_count`,\n" + + " `nobel_laureate` AS `aggr__1__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__1__key_0`) AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 2592000000) AS\n" + + " `aggr__1__timeseries__key_0`, count(*) AS `aggr__1__timeseries__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `nobel_laureate` AS `aggr__1__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 2592000000) AS\n" + + " `aggr__1__timeseries__key_0`))\n" + + "WHERE `aggr__1__order_1_rank`<=11\n" + + "ORDER BY `aggr__1__order_1_rank` ASC, `aggr__1__timeseries__order_1_rank` ASC", }, { // [1] TestName: "multiple buckets_path", @@ -344,14 +342,13 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__timeseries__a2-numerator_col_0", int64(202)), }}, }, - ExpectedPancakeSQL: ` - SELECT count(*) AS "aggr__timeseries__count", - countIf(true) AS "metric__timeseries__a2-denominator_col_0", - countIf(NOT ("table.flower" __quesma_match 'clover')) AS - "metric__timeseries__a2-numerator_col_0" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1728640683723) AND "@timestamp"<= - fromUnixTimestamp64Milli(1728641583723))`, + ExpectedPancakeSQL: "SELECT count(*) AS `aggr__timeseries__count`,\n" + + " countIf(true) AS `metric__timeseries__a2-denominator_col_0`,\n" + + " countIf(NOT (`table.flower` __quesma_match 'clover')) AS\n" + + " `metric__timeseries__a2-numerator_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1728640683723) AND `@timestamp`<= \n" + + " fromUnixTimestamp64Milli(1728641583723))", }, { // [2] TestName: "simplest auto_date_histogram", @@ -474,11 +471,10 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__timeseries__count", int64(202)), }}, }, - ExpectedPancakeSQL: ` - SELECT count(*) AS "aggr__timeseries__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1728581627125) AND "timestamp"<= - fromUnixTimestamp64Milli(1728635627125))`, + ExpectedPancakeSQL: "SELECT count(*) AS `aggr__timeseries__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1728581627125) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1728635627125))", AdditionalAcceptableDifference: []string{"key_as_string"}, // timezone differences between local and github runs... There's always 2h difference between those, need to investigate. Maybe come back to .UTC() so there's no "+timezone" (e.g. +02:00)? }, { // [3] @@ -642,14 +638,13 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__timeseries__f2-numerator_col_0", int64(178)), }}, }, - ExpectedPancakeSQL: ` - SELECT count(*) AS "aggr__timeseries__count", - countIf(true) AS "metric__timeseries__f2-denominator_col_0", - countIf(NOT ("a.b_str" IS NOT NULL)) AS - "metric__timeseries__f2-numerator_col_0" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1721399904783) AND "@timestamp"<= - fromUnixTimestamp64Milli(1730475504783))`, + ExpectedPancakeSQL: "SELECT count(*) AS `aggr__timeseries__count`,\n" + + " countIf(true) AS `metric__timeseries__f2-denominator_col_0`,\n" + + " countIf(NOT (`a.b_str` IS NOT NULL)) AS\n" + + " `metric__timeseries__f2-numerator_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1721399904783) AND `@timestamp`<= \n" + + " fromUnixTimestamp64Milli(1730475504783))", }, { // [4] TestName: "todo", @@ -760,17 +755,16 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__other-filter__3__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__other-filter__count", - sum(count(*)) OVER () AS "aggr__other-filter__3__parent_count", - "field" AS "aggr__other-filter__3__key_0", - count(*) AS "aggr__other-filter__3__count" - FROM __quesma_table_name - WHERE ("a" __quesma_match '%b%' AND "c" __quesma_match '%d%') - GROUP BY "field" AS "aggr__other-filter__3__key_0" - ORDER BY "aggr__other-filter__3__count" DESC, - "aggr__other-filter__3__key_0" ASC - LIMIT 16`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__other-filter__count`,\n" + + " sum(count(*)) OVER () AS `aggr__other-filter__3__parent_count`,\n" + + " `field` AS `aggr__other-filter__3__key_0`,\n" + + " count(*) AS `aggr__other-filter__3__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`a` __quesma_match '%b%' AND `c` __quesma_match '%d%')\n" + + "GROUP BY `field` AS `aggr__other-filter__3__key_0`\n" + + "ORDER BY `aggr__other-filter__3__count` DESC,\n" + + " `aggr__other-filter__3__key_0` ASC\n" + + "LIMIT 16", }, { // [5] TestName: "todo", @@ -944,17 +938,16 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__q__time_buckets__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__q__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 604800000) AS - "aggr__q__time_buckets__key_0", count(*) AS "aggr__q__time_buckets__count" - FROM __quesma_table_name - WHERE (("@timestamp">=fromUnixTimestamp64Milli(1728507729621) AND "@timestamp"<= - fromUnixTimestamp64Milli(1728507732621)) AND "__quesma_fulltext_field_name" - __quesma_match '%') - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 604800000) AS - "aggr__q__time_buckets__key_0" - ORDER BY "aggr__q__time_buckets__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__q__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 604800000) AS\n" + + " `aggr__q__time_buckets__key_0`, count(*) AS `aggr__q__time_buckets__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`@timestamp`>=fromUnixTimestamp64Milli(1728507729621) AND `@timestamp`<= \n" + + " fromUnixTimestamp64Milli(1728507732621)) AND `__quesma_fulltext_field_name`\n" + + " __quesma_match '%')\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 604800000) AS\n" + + " `aggr__q__time_buckets__key_0`\n" + + "ORDER BY `aggr__q__time_buckets__key_0` ASC", }, { // [6] TestName: "Clover", @@ -1108,18 +1101,17 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__q__time_buckets__sum(count)_col_0", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__q__count", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 1800000) AS - "aggr__q__time_buckets__key_0", count(*) AS "aggr__q__time_buckets__count", - sumOrNull("count") AS "metric__q__time_buckets__sum(count)_col_0" - FROM __quesma_table_name - WHERE NOT ("str_field" __quesma_match 'CRASH') - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone - ("@timestamp", 'Europe/Warsaw'))*1000) / 1800000) AS - "aggr__q__time_buckets__key_0" - ORDER BY "aggr__q__time_buckets__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__q__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 1800000) AS\n" + + " `aggr__q__time_buckets__key_0`, count(*) AS `aggr__q__time_buckets__count`,\n" + + " sumOrNull(`count`) AS `metric__q__time_buckets__sum(count)_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE NOT (`str_field` __quesma_match 'CRASH')\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone\n" + + " (`@timestamp`, 'Europe/Warsaw'))*1000) / 1800000) AS\n" + + " `aggr__q__time_buckets__key_0`\n" + + "ORDER BY `aggr__q__time_buckets__key_0` ASC", }, { TestName: "Weird aggregation and filter names", @@ -1208,16 +1200,15 @@ var CloverTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__q__time__cardinality(a.b.keyword)_col_0", int64(672)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__q__count", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__q__time__key_0", - count(*) AS "aggr__q__time__count", - uniq("a.b") AS "metric__q__time__cardinality(a.b.keyword)_col_0" - FROM __quesma_table_name - WHERE (("a.b" __quesma_match '%c%') OR "a.b" __quesma_match '%d%') - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone - ("@timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__q__time__key_0" - ORDER BY "aggr__q__time__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__q__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__q__time__key_0`,\n" + + " count(*) AS `aggr__q__time__count`,\n" + + " uniq(`a.b`) AS `metric__q__time__cardinality(a.b.keyword)_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`a.b` __quesma_match '%c%') OR `a.b` __quesma_match '%d%')\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone\n" + + " (`@timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__q__time__key_0`\n" + + "ORDER BY `aggr__q__time__key_0` ASC", }, } diff --git a/platform/testdata/clients/kunkka.go b/platform/testdata/clients/kunkka.go index 95d0df44b..4ce6ec23b 100644 --- a/platform/testdata/clients/kunkka.go +++ b/platform/testdata/clients/kunkka.go @@ -8,7 +8,7 @@ import ( ) const TableName = model.SingleTableNamePlaceHolder -const fullTextFieldName = `"` + model.FullTextFieldNamePlaceHolder + `"` +const fullTextFieldName = "`" + model.FullTextFieldNamePlaceHolder + "`" var KunkkaTests = []testdata.AggregationTestCase{ { // [0] @@ -169,17 +169,16 @@ var KunkkaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__2-bucket__2-metric_col_0", 1.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count", - sumOrNull("spent") AS "metric__0__1_col_0", - countIf(` + fullTextFieldName + ` iLIKE '%started%') AS "aggr__0__2-bucket__count", - sumOrNullIf("multiplier", ` + fullTextFieldName + ` iLIKE '%started%') AS - "metric__0__2-bucket__2-metric_col_0" - FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 3600000) AS\n" + + " `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " sumOrNull(`spent`) AS `metric__0__1_col_0`,\n" + + " countIf(" + fullTextFieldName + " iLIKE '%started%') AS `aggr__0__2-bucket__count`,\n" + + " sumOrNullIf(`multiplier`, " + fullTextFieldName + " iLIKE '%started%') AS\n" + + " `metric__0__2-bucket__2-metric_col_0`\n" + + "FROM `" + TableName + "`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 3600000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [1] TestName: "it's the same input as in previous test, but with the original output from Elastic." + @@ -352,18 +351,17 @@ var KunkkaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__2-bucket__2-metric_col_0", 1.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - sumOrNull("spent") AS "metric__0__1_col_0", - countIf(` + fullTextFieldName + ` iLIKE '%started%') AS "aggr__0__2-bucket__count", - sumOrNullIf("multiplier", ` + fullTextFieldName + ` iLIKE '%started%') AS - "metric__0__2-bucket__2-metric_col_0" - FROM ` + TableName + ` - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 3600000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " sumOrNull(`spent`) AS `metric__0__1_col_0`,\n" + + " countIf(" + fullTextFieldName + " iLIKE '%started%') AS `aggr__0__2-bucket__count`,\n" + + " sumOrNullIf(`multiplier`, " + fullTextFieldName + " iLIKE '%started%') AS\n" + + " `metric__0__2-bucket__2-metric_col_0`\n" + + "FROM `" + TableName + "`\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 3600000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [2] TestName: "clients/kunkka/test_1, used to be broken before aggregations merge fix", diff --git a/platform/testdata/clients/ophelia.go b/platform/testdata/clients/ophelia.go index 662cc862b..ee0fae6cc 100644 --- a/platform/testdata/clients/ophelia.go +++ b/platform/testdata/clients/ophelia.go @@ -229,43 +229,42 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__4__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - "aggr__2__8__4__parent_count", "aggr__2__8__4__key_0", "aggr__2__8__4__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - "aggr__2__8__4__parent_count", "aggr__2__8__4__key_0", - "aggr__2__8__4__count", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__count" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0" ORDER - BY "aggr__2__8__4__count" DESC, "aggr__2__8__4__key_0" ASC) AS - "aggr__2__8__4__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__4__parent_count", "organName" AS "aggr__2__8__4__key_0", - count(*) AS "aggr__2__8__4__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - "organName" AS "aggr__2__8__4__key_0")) - WHERE (("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=20) AND - "aggr__2__8__4__order_1_rank"<=2) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC, - "aggr__2__8__4__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " `aggr__2__8__4__parent_count`, `aggr__2__8__4__key_0`, `aggr__2__8__4__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " `aggr__2__8__4__parent_count`, `aggr__2__8__4__key_0`,\n" + + " `aggr__2__8__4__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__count` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0` ORDER\n" + + " BY `aggr__2__8__4__count` DESC, `aggr__2__8__4__key_0` ASC) AS\n" + + " `aggr__2__8__4__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__4__parent_count`, `organName` AS `aggr__2__8__4__key_0`,\n" + + " count(*) AS `aggr__2__8__4__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " `organName` AS `aggr__2__8__4__key_0`))\n" + + "WHERE ((`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=20) AND\n" + + " `aggr__2__8__4__order_1_rank`<=2)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC,\n" + + " `aggr__2__8__4__order_1_rank` ASC", }, { // [1] TestName: "Ophelia Test 2: triple terms + other aggregations + default order", @@ -568,52 +567,51 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__8__4__5_col_0", 205408.48849999998), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__8__parent_count", "aggr__2__8__key_0", - "aggr__2__8__count", "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", "metric__2__8__4__1_col_0", - "metric__2__8__4__5_col_0" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__8__parent_count", "aggr__2__8__key_0", - "aggr__2__8__count", "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", "metric__2__8__4__1_col_0", - "metric__2__8__4__5_col_0", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__8__count" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0" ORDER - BY "aggr__2__8__4__count" DESC, "aggr__2__8__4__key_0" ASC) AS - "aggr__2__8__4__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0") AS - "metric__2__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__8__key_0") AS "metric__2__8__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__4__parent_count", "organName" AS "aggr__2__8__4__key_0", - count(*) AS "aggr__2__8__4__count", - sumOrNull("total") AS "metric__2__8__4__1_col_0", - sumOrNull("some") AS "metric__2__8__4__5_col_0" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - "organName" AS "aggr__2__8__4__key_0")) - WHERE (("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=20) AND - "aggr__2__8__4__order_1_rank"<=2) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC, - "aggr__2__8__4__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `metric__2__1_col_0`, `aggr__2__8__parent_count`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__count`, `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`, `metric__2__8__4__1_col_0`,\n" + + " `metric__2__8__4__5_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `metric__2__1_col_0`, `aggr__2__8__parent_count`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__count`, `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`, `metric__2__8__4__1_col_0`,\n" + + " `metric__2__8__4__5_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__8__count` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0` ORDER\n" + + " BY `aggr__2__8__4__count` DESC, `aggr__2__8__4__key_0` ASC) AS\n" + + " `aggr__2__8__4__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `metric__2__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__count`,\n" + + " sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`,\n" + + " `aggr__2__8__key_0`) AS `metric__2__8__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__4__parent_count`, `organName` AS `aggr__2__8__4__key_0`,\n" + + " count(*) AS `aggr__2__8__4__count`,\n" + + " sumOrNull(`total`) AS `metric__2__8__4__1_col_0`,\n" + + " sumOrNull(`some`) AS `metric__2__8__4__5_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " `organName` AS `aggr__2__8__4__key_0`))\n" + + "WHERE ((`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=20) AND\n" + + " `aggr__2__8__4__order_1_rank`<=2)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC,\n" + + " `aggr__2__8__4__order_1_rank` ASC", }, { // [2] TestName: "Ophelia Test 3: 5x terms + a lot of other aggregations", @@ -1003,91 +1001,7 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__7__8__4__3__6_col_0", -0.6), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__7__parent_count", "aggr__2__7__key_0", - "aggr__2__7__count", "metric__2__7__1_col_0", "aggr__2__7__8__parent_count", - "aggr__2__7__8__key_0", "aggr__2__7__8__count", "metric__2__7__8__1_col_0", - "aggr__2__7__8__4__parent_count", "aggr__2__7__8__4__key_0", - "aggr__2__7__8__4__count", "metric__2__7__8__4__1_col_0", - "aggr__2__7__8__4__3__parent_count", "aggr__2__7__8__4__3__key_0", - "aggr__2__7__8__4__3__count", "metric__2__7__8__4__3__1_col_0", - "metric__2__7__8__4__3__5_col_0", "metric__2__7__8__4__3__6_col_0" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__7__parent_count", "aggr__2__7__key_0", - "aggr__2__7__count", "metric__2__7__1_col_0", "aggr__2__7__8__parent_count", - "aggr__2__7__8__key_0", "aggr__2__7__8__count", "metric__2__7__8__1_col_0", - "aggr__2__7__8__4__parent_count", "aggr__2__7__8__4__key_0", - "aggr__2__7__8__4__count", "metric__2__7__8__4__1_col_0", - "aggr__2__7__8__4__3__parent_count", "aggr__2__7__8__4__3__key_0", - "aggr__2__7__8__4__3__count", "metric__2__7__8__4__3__1_col_0", - "metric__2__7__8__4__3__5_col_0", "metric__2__7__8__4__3__6_col_0", - dense_rank() OVER (ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC) - AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "metric__2__7__1_col_0" DESC, "aggr__2__7__key_0" ASC) AS - "aggr__2__7__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0" ORDER - BY "metric__2__7__8__1_col_0" DESC, "aggr__2__7__8__key_0" ASC) AS - "aggr__2__7__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0" ORDER BY "metric__2__7__8__4__1_col_0" DESC, - "aggr__2__7__8__4__key_0" ASC) AS "aggr__2__7__8__4__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0" ORDER BY - "metric__2__7__8__4__3__1_col_0" DESC, "aggr__2__7__8__4__3__key_0" ASC) AS - "aggr__2__7__8__4__3__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0") AS - "metric__2__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__7__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__7__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0") AS - "aggr__2__7__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__7__key_0") AS "metric__2__7__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0") AS - "aggr__2__7__8__parent_count", - COALESCE("organName", '__missing__') AS "aggr__2__7__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0") AS "aggr__2__7__8__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__7__key_0", "aggr__2__7__8__key_0") AS "metric__2__7__8__1_col_0" - , - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0") AS "aggr__2__7__8__4__parent_count", - "doctorName" AS "aggr__2__7__8__4__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0") AS - "aggr__2__7__8__4__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__7__key_0", "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0") AS - "metric__2__7__8__4__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0") AS - "aggr__2__7__8__4__3__parent_count", - "height" AS "aggr__2__7__8__4__3__key_0", - count(*) AS "aggr__2__7__8__4__3__count", - sumOrNull("total") AS "metric__2__7__8__4__3__1_col_0", - sumOrNull("some") AS "metric__2__7__8__4__3__5_col_0", - sumOrNull("cost") AS "metric__2__7__8__4__3__6_col_0" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__7__key_0", - COALESCE("organName", '__missing__') AS "aggr__2__7__8__key_0", - "doctorName" AS "aggr__2__7__8__4__key_0", - "height" AS "aggr__2__7__8__4__3__key_0")) - WHERE (((("aggr__2__order_1_rank"<=101 AND "aggr__2__7__order_1_rank"<=10) AND - "aggr__2__7__8__order_1_rank"<=10) AND "aggr__2__7__8__4__order_1_rank"<=7) - AND "aggr__2__7__8__4__3__order_1_rank"<=2) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__7__order_1_rank" ASC, - "aggr__2__7__8__order_1_rank" ASC, "aggr__2__7__8__4__order_1_rank" ASC, - "aggr__2__7__8__4__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n `metric__2__1_col_0`, `aggr__2__7__parent_count`, `aggr__2__7__key_0`,\n `aggr__2__7__count`, `metric__2__7__1_col_0`, `aggr__2__7__8__parent_count`,\n `aggr__2__7__8__key_0`, `aggr__2__7__8__count`, `metric__2__7__8__1_col_0`,\n `aggr__2__7__8__4__parent_count`, `aggr__2__7__8__4__key_0`,\n `aggr__2__7__8__4__count`, `metric__2__7__8__4__1_col_0`,\n `aggr__2__7__8__4__3__parent_count`, `aggr__2__7__8__4__3__key_0`,\n `aggr__2__7__8__4__3__count`, `metric__2__7__8__4__3__1_col_0`,\n `metric__2__7__8__4__3__5_col_0`, `metric__2__7__8__4__3__6_col_0`\n FROM (\n SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n `metric__2__1_col_0`, `aggr__2__7__parent_count`, `aggr__2__7__key_0`,\n `aggr__2__7__count`, `metric__2__7__1_col_0`, `aggr__2__7__8__parent_count`,\n `aggr__2__7__8__key_0`, `aggr__2__7__8__count`, `metric__2__7__8__1_col_0`,\n `aggr__2__7__8__4__parent_count`, `aggr__2__7__8__4__key_0`,\n `aggr__2__7__8__4__count`, `metric__2__7__8__4__1_col_0`,\n `aggr__2__7__8__4__3__parent_count`, `aggr__2__7__8__4__3__key_0`,\n `aggr__2__7__8__4__3__count`, `metric__2__7__8__4__3__1_col_0`,\n `metric__2__7__8__4__3__5_col_0`, `metric__2__7__8__4__3__6_col_0`,\n dense_rank() OVER (ORDER BY `metric__2__1_col_0` DESC, `aggr__2__key_0` ASC)\n AS `aggr__2__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY `\n metric__2__7__1_col_0` DESC, `aggr__2__7__key_0` ASC) AS `\n aggr__2__7__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0` ORDER\n BY `metric__2__7__8__1_col_0` DESC, `aggr__2__7__8__key_0` ASC) AS `\n aggr__2__7__8__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0` ORDER BY `metric__2__7__8__4__1_col_0` DESC, `\n aggr__2__7__8__4__key_0` ASC) AS `aggr__2__7__8__4__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0` ORDER BY `\n metric__2__7__8__4__3__1_col_0` DESC, `aggr__2__7__8__4__3__key_0` ASC) AS `\n aggr__2__7__8__4__3__order_1_rank`\n FROM (\n SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n `surname` AS `aggr__2__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`) AS `\n metric__2__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `\n aggr__2__7__parent_count`,\n COALESCE(`limbName`, '__missing__') AS `aggr__2__7__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`) AS\n `aggr__2__7__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`, `\n aggr__2__7__key_0`) AS `metric__2__7__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`) AS\n `aggr__2__7__8__parent_count`,\n COALESCE(`organName`, '__missing__') AS `aggr__2__7__8__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`) AS `aggr__2__7__8__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`, `\n aggr__2__7__key_0`, `aggr__2__7__8__key_0`) AS `metric__2__7__8__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`) AS `aggr__2__7__8__4__parent_count`,\n `doctorName` AS `aggr__2__7__8__4__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0`) AS `\n aggr__2__7__8__4__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`, `\n aggr__2__7__key_0`, `aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0`) AS\n `metric__2__7__8__4__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0`) AS `\n aggr__2__7__8__4__3__parent_count`,\n `height` AS `aggr__2__7__8__4__3__key_0`,\n count(*) AS `aggr__2__7__8__4__3__count`,\n sumOrNull(`total`) AS `metric__2__7__8__4__3__1_col_0`,\n sumOrNull(`some`) AS `metric__2__7__8__4__3__5_col_0`,\n sumOrNull(`cost`) AS `metric__2__7__8__4__3__6_col_0`\n FROM `__quesma_table_name`\n GROUP BY `surname` AS `aggr__2__key_0`,\n COALESCE(`limbName`, '__missing__') AS `aggr__2__7__key_0`,\n COALESCE(`organName`, '__missing__') AS `aggr__2__7__8__key_0`,\n `doctorName` AS `aggr__2__7__8__4__key_0`,\n `height` AS `aggr__2__7__8__4__3__key_0`))\n WHERE ((((`aggr__2__order_1_rank`<=101 AND `aggr__2__7__order_1_rank`<=10) AND `\n aggr__2__7__8__order_1_rank`<=10) AND `aggr__2__7__8__4__order_1_rank`<=7) AND\n `aggr__2__7__8__4__3__order_1_rank`<=2)\n ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__7__order_1_rank` ASC,\n `aggr__2__7__8__order_1_rank` ASC, `aggr__2__7__8__4__order_1_rank` ASC,\n `aggr__2__7__8__4__3__order_1_rank` ASC", }, { // [3] TestName: "Ophelia Test 4: triple terms + order by another aggregations", @@ -1355,47 +1269,46 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__4__count", int64(17)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__8__parent_count", "aggr__2__8__key_0", - "aggr__2__8__count", "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__8__parent_count", "aggr__2__8__key_0", - "aggr__2__8__count", "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", - dense_rank() OVER (ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC) - AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "metric__2__8__1_col_0" ASC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0" ORDER - BY "aggr__2__8__4__key_0" DESC) AS "aggr__2__8__4__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - avgOrNullMerge(avgOrNullState("total")) OVER (PARTITION BY - "aggr__2__key_0") AS "metric__2__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__8__key_0") AS "metric__2__8__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__4__parent_count", "organName" AS "aggr__2__8__4__key_0", - count(*) AS "aggr__2__8__4__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - "organName" AS "aggr__2__8__4__key_0")) - WHERE (("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=20) AND - "aggr__2__8__4__order_1_rank"<=2) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC, - "aggr__2__8__4__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `metric__2__1_col_0`, `aggr__2__8__parent_count`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__count`, `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `metric__2__1_col_0`, `aggr__2__8__parent_count`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__count`, `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`,\n" + + " dense_rank() OVER (ORDER BY `metric__2__1_col_0` DESC, `aggr__2__key_0` ASC)\n" + + " AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `metric__2__8__1_col_0` ASC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0` ORDER\n" + + " BY `aggr__2__8__4__key_0` DESC) AS `aggr__2__8__4__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " avgOrNullMerge(avgOrNullState(`total`)) OVER (PARTITION BY\n" + + " `aggr__2__key_0`) AS `metric__2__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__count`,\n" + + " sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`,\n" + + " `aggr__2__8__key_0`) AS `metric__2__8__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__4__parent_count`, `organName` AS `aggr__2__8__4__key_0`,\n" + + " count(*) AS `aggr__2__8__4__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " `organName` AS `aggr__2__8__4__key_0`))\n" + + "WHERE ((`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=20) AND\n" + + " `aggr__2__8__4__order_1_rank`<=2)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC,\n" + + " `aggr__2__8__4__order_1_rank` ASC", }, { // [4] TestName: "Ophelia Test 5: 4x terms + order by another aggregations", @@ -1704,58 +1617,57 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__8__4__5__count", int64(21)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", - "aggr__2__8__4__5__parent_count", "aggr__2__8__4__5__key_0", - "aggr__2__8__4__5__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__8__parent_count", "aggr__2__8__key_0", "aggr__2__8__count", - "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", - "aggr__2__8__4__5__parent_count", "aggr__2__8__4__5__key_0", - "aggr__2__8__4__5__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" DESC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "metric__2__8__1_col_0" ASC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0" ORDER - BY "aggr__2__8__4__key_0" DESC) AS "aggr__2__8__4__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0", - "aggr__2__8__4__key_0" ORDER BY "aggr__2__8__4__5__count" DESC, - "aggr__2__8__4__5__key_0" ASC) AS "aggr__2__8__4__5__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__8__key_0") AS "metric__2__8__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__4__parent_count", "organName" AS "aggr__2__8__4__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0", - "aggr__2__8__4__key_0") AS "aggr__2__8__4__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0", - "aggr__2__8__4__key_0") AS "aggr__2__8__4__5__parent_count", - "organName" AS "aggr__2__8__4__5__key_0", - count(*) AS "aggr__2__8__4__5__count" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - "organName" AS "aggr__2__8__4__key_0", - "organName" AS "aggr__2__8__4__5__key_0")) - WHERE ((("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=20) AND - "aggr__2__8__4__order_1_rank"<=2) AND "aggr__2__8__4__5__order_1_rank"<=3) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC, - "aggr__2__8__4__order_1_rank" ASC, "aggr__2__8__4__5__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`,\n" + + " `aggr__2__8__4__5__parent_count`, `aggr__2__8__4__5__key_0`,\n" + + " `aggr__2__8__4__5__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__8__parent_count`, `aggr__2__8__key_0`, `aggr__2__8__count`,\n" + + " `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`,\n" + + " `aggr__2__8__4__5__parent_count`, `aggr__2__8__4__5__key_0`,\n" + + " `aggr__2__8__4__5__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` DESC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `metric__2__8__1_col_0` ASC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0` ORDER\n" + + " BY `aggr__2__8__4__key_0` DESC) AS `aggr__2__8__4__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__4__key_0` ORDER BY `aggr__2__8__4__5__count` DESC,\n" + + " `aggr__2__8__4__5__key_0` ASC) AS `aggr__2__8__4__5__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__count`,\n" + + " sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`,\n" + + " `aggr__2__8__key_0`) AS `metric__2__8__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__4__parent_count`, `organName` AS `aggr__2__8__4__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__4__key_0`) AS `aggr__2__8__4__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__4__key_0`) AS `aggr__2__8__4__5__parent_count`,\n" + + " `organName` AS `aggr__2__8__4__5__key_0`,\n" + + " count(*) AS `aggr__2__8__4__5__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " `organName` AS `aggr__2__8__4__key_0`,\n" + + " `organName` AS `aggr__2__8__4__5__key_0`))\n" + + "WHERE (((`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=20) AND\n" + + " `aggr__2__8__4__order_1_rank`<=2) AND `aggr__2__8__4__5__order_1_rank`<=3)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC,\n" + + " `aggr__2__8__4__order_1_rank` ASC, `aggr__2__8__4__5__order_1_rank` ASC", }, { // [5] TestName: "Ophelia Test 6: triple terms + other aggregations + order by another aggregations", @@ -2067,52 +1979,51 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__8__4__5_col_0", 205408.48849999998), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__8__parent_count", "aggr__2__8__key_0", - "aggr__2__8__count", "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", "metric__2__8__4__1_col_0", - "metric__2__8__4__5_col_0" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__8__parent_count", "aggr__2__8__key_0", - "aggr__2__8__count", "metric__2__8__1_col_0", "aggr__2__8__4__parent_count", - "aggr__2__8__4__key_0", "aggr__2__8__4__count", "metric__2__8__4__1_col_0", - "metric__2__8__4__5_col_0", - dense_rank() OVER (ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC) - AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "metric__2__8__1_col_0" DESC, "aggr__2__8__key_0" ASC) AS - "aggr__2__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0" ORDER - BY "metric__2__8__4__1_col_0" DESC, "aggr__2__8__4__key_0" ASC) AS - "aggr__2__8__4__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0") AS - "metric__2__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__8__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__8__key_0") AS "metric__2__8__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__8__key_0") AS - "aggr__2__8__4__parent_count", "organName" AS "aggr__2__8__4__key_0", - count(*) AS "aggr__2__8__4__count", - sumOrNull("total") AS "metric__2__8__4__1_col_0", - sumOrNull("some") AS "metric__2__8__4__5_col_0" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__8__key_0", - "organName" AS "aggr__2__8__4__key_0")) - WHERE (("aggr__2__order_1_rank"<=201 AND "aggr__2__8__order_1_rank"<=20) AND - "aggr__2__8__4__order_1_rank"<=2) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__8__order_1_rank" ASC, - "aggr__2__8__4__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `metric__2__1_col_0`, `aggr__2__8__parent_count`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__count`, `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`, `metric__2__8__4__1_col_0`,\n" + + " `metric__2__8__4__5_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `metric__2__1_col_0`, `aggr__2__8__parent_count`, `aggr__2__8__key_0`,\n" + + " `aggr__2__8__count`, `metric__2__8__1_col_0`, `aggr__2__8__4__parent_count`,\n" + + " `aggr__2__8__4__key_0`, `aggr__2__8__4__count`, `metric__2__8__4__1_col_0`,\n" + + " `metric__2__8__4__5_col_0`,\n" + + " dense_rank() OVER (ORDER BY `metric__2__1_col_0` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `metric__2__8__1_col_0` DESC, `aggr__2__8__key_0` ASC) AS\n" + + " `aggr__2__8__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0` ORDER\n" + + " BY `metric__2__8__4__1_col_0` DESC, `aggr__2__8__4__key_0` ASC) AS\n" + + " `aggr__2__8__4__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `surname` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `metric__2__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__8__parent_count`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__count`,\n" + + " sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`,\n" + + " `aggr__2__8__key_0`) AS `metric__2__8__1_col_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__8__key_0`) AS\n" + + " `aggr__2__8__4__parent_count`, `organName` AS `aggr__2__8__4__key_0`,\n" + + " count(*) AS `aggr__2__8__4__count`,\n" + + " sumOrNull(`total`) AS `metric__2__8__4__1_col_0`,\n" + + " sumOrNull(`some`) AS `metric__2__8__4__5_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `surname` AS `aggr__2__key_0`,\n" + + " COALESCE(`limbName`, '__missing__') AS `aggr__2__8__key_0`,\n" + + " `organName` AS `aggr__2__8__4__key_0`))\n" + + "WHERE ((`aggr__2__order_1_rank`<=201 AND `aggr__2__8__order_1_rank`<=20) AND\n" + + " `aggr__2__8__4__order_1_rank`<=2)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__8__order_1_rank` ASC,\n" + + " `aggr__2__8__4__order_1_rank` ASC", }, { // [6] TestName: "Ophelia Test 7: 5x terms + a lot of other aggregations + different order bys", @@ -2502,89 +2413,6 @@ var OpheliaTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__7__8__4__3__6_col_0", -0.6), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__7__parent_count", "aggr__2__7__key_0", - "aggr__2__7__count", "metric__2__7__1_col_0", "aggr__2__7__8__parent_count", - "aggr__2__7__8__key_0", "aggr__2__7__8__count", "metric__2__7__8__1_col_0", - "aggr__2__7__8__4__parent_count", "aggr__2__7__8__4__key_0", - "aggr__2__7__8__4__count", "metric__2__7__8__4__1_col_0", - "aggr__2__7__8__4__3__parent_count", "aggr__2__7__8__4__3__key_0", - "aggr__2__7__8__4__3__count", "metric__2__7__8__4__3__1_col_0", - "metric__2__7__8__4__3__5_col_0", "metric__2__7__8__4__3__6_col_0" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "metric__2__1_col_0", "aggr__2__7__parent_count", "aggr__2__7__key_0", - "aggr__2__7__count", "metric__2__7__1_col_0", "aggr__2__7__8__parent_count", - "aggr__2__7__8__key_0", "aggr__2__7__8__count", "metric__2__7__8__1_col_0", - "aggr__2__7__8__4__parent_count", "aggr__2__7__8__4__key_0", - "aggr__2__7__8__4__count", "metric__2__7__8__4__1_col_0", - "aggr__2__7__8__4__3__parent_count", "aggr__2__7__8__4__3__key_0", - "aggr__2__7__8__4__3__count", "metric__2__7__8__4__3__1_col_0", - "metric__2__7__8__4__3__5_col_0", "metric__2__7__8__4__3__6_col_0", - dense_rank() OVER (ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC) - AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__7__key_0" ASC) AS "aggr__2__7__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0" ORDER - BY "aggr__2__7__8__count" DESC, "aggr__2__7__8__key_0" ASC) AS - "aggr__2__7__8__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0" ORDER BY "metric__2__7__8__4__1_col_0" DESC, - "aggr__2__7__8__4__key_0" ASC) AS "aggr__2__7__8__4__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0" ORDER BY - "metric__2__7__8__4__3__6_col_0" ASC, "aggr__2__7__8__4__3__key_0" ASC) AS - "aggr__2__7__8__4__3__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "surname" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0") AS - "metric__2__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__7__parent_count", - COALESCE("limbName", '__missing__') AS "aggr__2__7__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0") AS - "aggr__2__7__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__7__key_0") AS "metric__2__7__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0") AS - "aggr__2__7__8__parent_count", - COALESCE("organName", '__missing__') AS "aggr__2__7__8__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0") AS "aggr__2__7__8__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__7__key_0", "aggr__2__7__8__key_0") AS "metric__2__7__8__1_col_0" - , - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0") AS "aggr__2__7__8__4__parent_count", - "doctorName" AS "aggr__2__7__8__4__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0") AS - "aggr__2__7__8__4__count", - sumOrNull(sumOrNull("total")) OVER (PARTITION BY "aggr__2__key_0", - "aggr__2__7__key_0", "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0") AS - "metric__2__7__8__4__1_col_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0", "aggr__2__7__key_0", - "aggr__2__7__8__key_0", "aggr__2__7__8__4__key_0") AS - "aggr__2__7__8__4__3__parent_count", - "height" AS "aggr__2__7__8__4__3__key_0", - count(*) AS "aggr__2__7__8__4__3__count", - sumOrNull("total") AS "metric__2__7__8__4__3__1_col_0", - sumOrNull("some") AS "metric__2__7__8__4__3__5_col_0", - sumOrNull("cost") AS "metric__2__7__8__4__3__6_col_0" - FROM __quesma_table_name - GROUP BY "surname" AS "aggr__2__key_0", - COALESCE("limbName", '__missing__') AS "aggr__2__7__key_0", - COALESCE("organName", '__missing__') AS "aggr__2__7__8__key_0", - "doctorName" AS "aggr__2__7__8__4__key_0", - "height" AS "aggr__2__7__8__4__3__key_0")) - WHERE (((("aggr__2__order_1_rank"<=101 AND "aggr__2__7__order_1_rank"<=10) AND - "aggr__2__7__8__order_1_rank"<=10) AND "aggr__2__7__8__4__order_1_rank"<=7) - AND "aggr__2__7__8__4__3__order_1_rank"<=2) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__7__order_1_rank" ASC, - "aggr__2__7__8__order_1_rank" ASC, "aggr__2__7__8__4__order_1_rank" ASC, - "aggr__2__7__8__4__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n `metric__2__1_col_0`, `aggr__2__7__parent_count`, `aggr__2__7__key_0`,\n `aggr__2__7__count`, `metric__2__7__1_col_0`, `aggr__2__7__8__parent_count`,\n `aggr__2__7__8__key_0`, `aggr__2__7__8__count`, `metric__2__7__8__1_col_0`,\n `aggr__2__7__8__4__parent_count`, `aggr__2__7__8__4__key_0`,\n `aggr__2__7__8__4__count`, `metric__2__7__8__4__1_col_0`,\n `aggr__2__7__8__4__3__parent_count`, `aggr__2__7__8__4__3__key_0`,\n `aggr__2__7__8__4__3__count`, `metric__2__7__8__4__3__1_col_0`,\n `metric__2__7__8__4__3__5_col_0`, `metric__2__7__8__4__3__6_col_0`\n FROM (\n SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n `metric__2__1_col_0`, `aggr__2__7__parent_count`, `aggr__2__7__key_0`,\n `aggr__2__7__count`, `metric__2__7__1_col_0`, `aggr__2__7__8__parent_count`,\n `aggr__2__7__8__key_0`, `aggr__2__7__8__count`, `metric__2__7__8__1_col_0`,\n `aggr__2__7__8__4__parent_count`, `aggr__2__7__8__4__key_0`,\n `aggr__2__7__8__4__count`, `metric__2__7__8__4__1_col_0`,\n `aggr__2__7__8__4__3__parent_count`, `aggr__2__7__8__4__3__key_0`,\n `aggr__2__7__8__4__3__count`, `metric__2__7__8__4__3__1_col_0`,\n `metric__2__7__8__4__3__5_col_0`, `metric__2__7__8__4__3__6_col_0`,\n dense_rank() OVER (ORDER BY `metric__2__1_col_0` DESC, `aggr__2__key_0` ASC)\n AS `aggr__2__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY `aggr__2__7__key_0\n ` ASC) AS `aggr__2__7__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0` ORDER\n BY `aggr__2__7__8__count` DESC, `aggr__2__7__8__key_0` ASC) AS `\n aggr__2__7__8__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0` ORDER BY `metric__2__7__8__4__1_col_0` DESC, `\n aggr__2__7__8__4__key_0` ASC) AS `aggr__2__7__8__4__order_1_rank`,\n dense_rank() OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0` ORDER BY `\n metric__2__7__8__4__3__6_col_0` ASC, `aggr__2__7__8__4__3__key_0` ASC) AS `\n aggr__2__7__8__4__3__order_1_rank`\n FROM (\n SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n `surname` AS `aggr__2__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`) AS `\n metric__2__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `\n aggr__2__7__parent_count`,\n COALESCE(`limbName`, '__missing__') AS `aggr__2__7__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`) AS\n `aggr__2__7__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`, `\n aggr__2__7__key_0`) AS `metric__2__7__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`) AS\n `aggr__2__7__8__parent_count`,\n COALESCE(`organName`, '__missing__') AS `aggr__2__7__8__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`) AS `aggr__2__7__8__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`, `\n aggr__2__7__key_0`, `aggr__2__7__8__key_0`) AS `metric__2__7__8__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`) AS `aggr__2__7__8__4__parent_count`,\n `doctorName` AS `aggr__2__7__8__4__key_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0`) AS `\n aggr__2__7__8__4__count`,\n sumOrNull(sumOrNull(`total`)) OVER (PARTITION BY `aggr__2__key_0`, `\n aggr__2__7__key_0`, `aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0`) AS\n `metric__2__7__8__4__1_col_0`,\n sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`, `aggr__2__7__key_0`, `\n aggr__2__7__8__key_0`, `aggr__2__7__8__4__key_0`) AS `\n aggr__2__7__8__4__3__parent_count`,\n `height` AS `aggr__2__7__8__4__3__key_0`,\n count(*) AS `aggr__2__7__8__4__3__count`,\n sumOrNull(`total`) AS `metric__2__7__8__4__3__1_col_0`,\n sumOrNull(`some`) AS `metric__2__7__8__4__3__5_col_0`,\n sumOrNull(`cost`) AS `metric__2__7__8__4__3__6_col_0`\n FROM `__quesma_table_name`\n GROUP BY `surname` AS `aggr__2__key_0`,\n COALESCE(`limbName`, '__missing__') AS `aggr__2__7__key_0`,\n COALESCE(`organName`, '__missing__') AS `aggr__2__7__8__key_0`,\n `doctorName` AS `aggr__2__7__8__4__key_0`,\n `height` AS `aggr__2__7__8__4__3__key_0`))\n WHERE ((((`aggr__2__order_1_rank`<=101 AND `aggr__2__7__order_1_rank`<=10) AND `\n aggr__2__7__8__order_1_rank`<=10) AND `aggr__2__7__8__4__order_1_rank`<=7) AND\n `aggr__2__7__8__4__3__order_1_rank`<=2)\n ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__7__order_1_rank` ASC,\n `aggr__2__7__8__order_1_rank` ASC, `aggr__2__7__8__4__order_1_rank` ASC,\n `aggr__2__7__8__4__3__order_1_rank` ASC", }, } diff --git a/platform/testdata/clients/turing.go b/platform/testdata/clients/turing.go index 8534c612b..d324e65cd 100644 --- a/platform/testdata/clients/turing.go +++ b/platform/testdata/clients/turing.go @@ -45,30 +45,28 @@ var TuringTests = []testdata.AggregationTestCase{ } }`, ExpectedPancakeResults: []model.QueryResultRow{}, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__parent_count", - "aggr__2__3__key_0", "aggr__2__3__count" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__parent_count", - "aggr__2__3__key_0", "aggr__2__3__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__count" DESC, "aggr__2__3__key_0" ASC) AS - "aggr__2__3__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__3__parent_count", "score" AS "aggr__2__3__key_0", - count(*) AS "aggr__2__3__count" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", "score" AS "aggr__2__3__key_0")) - WHERE "aggr__2__3__order_1_rank"<=6 - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__parent_count`,\n" + + " `aggr__2__3__key_0`, `aggr__2__3__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3__parent_count`,\n" + + " `aggr__2__3__key_0`, `aggr__2__3__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__count` DESC, `aggr__2__3__key_0` ASC) AS\n" + + " `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__3__parent_count`, `score` AS `aggr__2__3__key_0`,\n" + + " count(*) AS `aggr__2__3__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`, `score` AS `aggr__2__3__key_0`))\n" + + "WHERE `aggr__2__3__order_1_rank`<=6\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, } diff --git a/platform/testdata/dashboard-1/aggregation_requests.go b/platform/testdata/dashboard-1/aggregation_requests.go index b71b79937..8afcf148a 100644 --- a/platform/testdata/dashboard-1/aggregation_requests.go +++ b/platform/testdata/dashboard-1/aggregation_requests.go @@ -182,28 +182,27 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__1__2_col_0", 658654099), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__key_0", - "aggr__0__1__count", "metric__0__1__2_col_0" - FROM ( - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__key_0", - "aggr__0__1__count", "metric__0__1__2_col_0", - dense_rank() OVER (ORDER BY "aggr__0__key_0" ASC) AS "aggr__0__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY "aggr__0__key_0" - ASC, "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT floor("rspContentLen"/2e+06)*2e+06 AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - floor("rspContentLen"/2e+06)*2e+06 AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count", - avgOrNull("rspContentLen") AS "metric__0__1__2_col_0" - FROM __quesma_table_name - WHERE ("reqTimeSec">='2024-04-24T10:55:23.606Z' AND "reqTimeSec"<= - '2024-04-24T11:10:23.606Z') - GROUP BY floor("rspContentLen"/2e+06)*2e+06 AS "aggr__0__key_0", - floor("rspContentLen"/2e+06)*2e+06 AS "aggr__0__1__key_0")) - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`, `metric__0__1__2_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`, `metric__0__1__2_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__key_0` ASC) AS `aggr__0__order_1_rank`\n" + + " ,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY `aggr__0__key_0`\n" + + " ASC, `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(`rspContentLen`/2e+06)*2e+06 AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " floor(`rspContentLen`/2e+06)*2e+06 AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`,\n" + + " avgOrNull(`rspContentLen`) AS `metric__0__1__2_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`reqTimeSec`>='2024-04-24T10:55:23.606Z' AND `reqTimeSec`<= \n" + + " '2024-04-24T11:10:23.606Z')\n" + + " GROUP BY floor(`rspContentLen`/2e+06)*2e+06 AS `aggr__0__key_0`,\n" + + " floor(`rspContentLen`/2e+06)*2e+06 AS `aggr__0__1__key_0`))\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [1] TestName: "dashboard-1: bug, used to be infinite loop", @@ -441,30 +440,29 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__1__2_col_0", []float64{83.8}), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__key_0", - "aggr__0__1__count", "metric__0__1__2_col_0" - FROM ( - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__key_0", - "aggr__0__1__count", "metric__0__1__2_col_0", - dense_rank() OVER (ORDER BY "aggr__0__key_0" ASC) AS "aggr__0__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("reqTimeSec")+timeZoneOffset( - toTimezone("reqTimeSec", 'Europe/Warsaw'))*1000) / 30000) AS - "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - floor("billingRegion"/0.5)*0.5 AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count", - quantiles(0.950000)("latency") AS "metric__0__1__2_col_0" - FROM __quesma_table_name - WHERE ("reqTimeSec">='2024-04-24T11:15:46.279Z' AND "reqTimeSec"<= - '2024-04-24T11:30:46.279Z') - GROUP BY toInt64((toUnixTimestamp64Milli("reqTimeSec")+timeZoneOffset( - toTimezone("reqTimeSec", 'Europe/Warsaw'))*1000) / 30000) AS - "aggr__0__key_0", floor("billingRegion"/0.5)*0.5 AS "aggr__0__1__key_0")) - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`, `metric__0__1__2_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`, `metric__0__1__2_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__key_0` ASC) AS `aggr__0__order_1_rank`\n" + + " ,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`reqTimeSec`)+timeZoneOffset(\n" + + " toTimezone(`reqTimeSec`, 'Europe/Warsaw'))*1000) / 30000) AS\n" + + " `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " floor(`billingRegion`/0.5)*0.5 AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`,\n" + + " quantiles(0.950000)(`latency`) AS `metric__0__1__2_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`reqTimeSec`>='2024-04-24T11:15:46.279Z' AND `reqTimeSec`<= \n" + + " '2024-04-24T11:30:46.279Z')\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`reqTimeSec`)+timeZoneOffset(\n" + + " toTimezone(`reqTimeSec`, 'Europe/Warsaw'))*1000) / 30000) AS\n" + + " `aggr__0__key_0`, floor(`billingRegion`/0.5)*0.5 AS `aggr__0__1__key_0`))\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, } diff --git a/platform/testdata/dates.go b/platform/testdata/dates.go index 301c89c4a..d597f0aea 100644 --- a/platform/testdata/dates.go +++ b/platform/testdata/dates.go @@ -105,18 +105,17 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__sampler__eventRate__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__sampler__count", - toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("order_date", 'UTC'))))*1000 - AS "aggr__sampler__eventRate__key_0", - count(*) AS "aggr__sampler__eventRate__count" - FROM ( - SELECT "order_date" - FROM __quesma_table_name - LIMIT 20000) - GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("order_date", 'UTC'))) - )*1000 AS "aggr__sampler__eventRate__key_0" - ORDER BY "aggr__sampler__eventRate__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__sampler__count`,\n" + + " toInt64(toUnixTimestamp(toStartOfWeek(toTimezone(`order_date`, 'UTC'))))*1000\n" + + " AS `aggr__sampler__eventRate__key_0`,\n" + + " count(*) AS `aggr__sampler__eventRate__count`\n" + + "FROM (\n" + + " SELECT `order_date`\n" + + " FROM `__quesma_table_name`\n" + + " LIMIT 20000)\n" + + "GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone(`order_date`, 'UTC')))\n" + + " )*1000 AS `aggr__sampler__eventRate__key_0`\n" + + "ORDER BY `aggr__sampler__eventRate__key_0` ASC", }, { // [1] TestName: "extended_bounds pre keys (timezone calculations most tricky to get right)", @@ -312,16 +311,15 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__timeseries__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 10000) AS "aggr__timeseries__key_0", - count(*) AS "aggr__timeseries__count" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1730370296174) AND "@timestamp"<= - fromUnixTimestamp64Milli(1730370596174)) - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone - ("@timestamp", 'Europe/Warsaw'))*1000) / 10000) AS "aggr__timeseries__key_0" - ORDER BY "aggr__timeseries__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 10000) AS `aggr__timeseries__key_0`,\n" + + " count(*) AS `aggr__timeseries__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1730370296174) AND `@timestamp`<= \n" + + " fromUnixTimestamp64Milli(1730370596174))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone\n" + + " (`@timestamp`, 'Europe/Warsaw'))*1000) / 10000) AS `aggr__timeseries__key_0`\n" + + "ORDER BY `aggr__timeseries__key_0` ASC", }, { // [2] TestName: "extended_bounds post keys (timezone calculations most tricky to get right)", @@ -493,16 +491,15 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__timeseries__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 10000) AS "aggr__timeseries__key_0", - count(*) AS "aggr__timeseries__count" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1730370296174) AND "@timestamp"<= - fromUnixTimestamp64Milli(1730370596174)) - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone - ("@timestamp", 'Europe/Warsaw'))*1000) / 10000) AS "aggr__timeseries__key_0" - ORDER BY "aggr__timeseries__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 10000) AS `aggr__timeseries__key_0`,\n" + + " count(*) AS `aggr__timeseries__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1730370296174) AND `@timestamp`<= \n" + + " fromUnixTimestamp64Milli(1730370596174))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone\n" + + " (`@timestamp`, 'Europe/Warsaw'))*1000) / 10000) AS `aggr__timeseries__key_0`\n" + + "ORDER BY `aggr__timeseries__key_0` ASC", }, { // [3] TestName: "empty results, we still should add empty buckets, because of the extended_bounds and min_doc_count defaulting to 0", @@ -622,17 +619,16 @@ var AggregationTestsWithDates = []AggregationTestCase{ "start_time_in_millis": 1707486436397 }`, ExpectedPancakeResults: []model.QueryResultRow{}, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 86400000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - sumOrNull("body_bytes_sent") AS "metric__0__1_col_0" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1259327903466) AND "@timestamp"<= - fromUnixTimestamp64Milli(1732713503466)) - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone - ("@timestamp", 'Europe/Warsaw'))*1000) / 86400000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 86400000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " sumOrNull(`body_bytes_sent`) AS `metric__0__1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1259327903466) AND `@timestamp`<= \n" + + " fromUnixTimestamp64Milli(1732713503466))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone\n" + + " (`@timestamp`, 'Europe/Warsaw'))*1000) / 86400000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [4] TestName: "date_histogram add in-between rows, calendar_interval: >= month (regression test)", @@ -706,13 +702,12 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__sales_per_month__count", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("date", 'UTC'))))*1000 - AS "aggr__sales_per_month__key_0", count(*) AS "aggr__sales_per_month__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("date", 'UTC'))))* - 1000 AS "aggr__sales_per_month__key_0" - ORDER BY "aggr__sales_per_month__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`date`, 'UTC'))))*1000\n" + + " AS `aggr__sales_per_month__key_0`, count(*) AS `aggr__sales_per_month__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`date`, 'UTC'))))*\n" + + " 1000 AS `aggr__sales_per_month__key_0`\n" + + "ORDER BY `aggr__sales_per_month__key_0` ASC", }, { // [5] TestName: "date_histogram add in-between rows, calendar_interval: >= month (regression test)", @@ -762,14 +757,13 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__sales_per_quarter__count", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("date", 'UTC'))))* - 1000 AS "aggr__sales_per_quarter__key_0", - count(*) AS "aggr__sales_per_quarter__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("date", 'UTC'))))* - 1000 AS "aggr__sales_per_quarter__key_0" - ORDER BY "aggr__sales_per_quarter__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone(`date`, 'UTC'))))*\n" + + " 1000 AS `aggr__sales_per_quarter__key_0`,\n" + + " count(*) AS `aggr__sales_per_quarter__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone(`date`, 'UTC'))))*\n" + + " 1000 AS `aggr__sales_per_quarter__key_0`\n" + + "ORDER BY `aggr__sales_per_quarter__key_0` ASC", }, { // [6] TestName: "date_histogram add in-between rows, calendar_interval: >= month (regression test)", @@ -819,14 +813,13 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__sales_per_year__count", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone("date", 'UTC'))))*1000 - AS "aggr__sales_per_year__key_0", - count(*) AS "aggr__sales_per_year__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone("date", 'UTC'))))*1000 - AS "aggr__sales_per_year__key_0" - ORDER BY "aggr__sales_per_year__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone(`date`, 'UTC'))))*1000\n" + + " AS `aggr__sales_per_year__key_0`,\n" + + " count(*) AS `aggr__sales_per_year__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone(`date`, 'UTC'))))*1000\n" + + " AS `aggr__sales_per_year__key_0`\n" + + "ORDER BY `aggr__sales_per_year__key_0` ASC", }, { // [7] TestName: "turing 1 - painless script in terms", @@ -941,30 +934,29 @@ var AggregationTestsWithDates = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__2__count", int64(6844)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__1__key_0", "aggr__1__count", "aggr__1__2__parent_count", - "aggr__1__2__key_0", "aggr__1__2__count" - FROM ( - SELECT "aggr__1__key_0", "aggr__1__count", "aggr__1__2__parent_count", - "aggr__1__2__key_0", "aggr__1__2__count", - dense_rank() OVER (ORDER BY "aggr__1__key_0" ASC) AS "aggr__1__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__1__key_0" ORDER BY - "aggr__1__2__count" DESC, "aggr__1__2__key_0" ASC) AS - "aggr__1__2__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS - "aggr__1__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__1__key_0") AS "aggr__1__count", - sum(count(*)) OVER (PARTITION BY "aggr__1__key_0") AS - "aggr__1__2__parent_count", - "request_id"="origin_request_id" AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS - "aggr__1__key_0", "request_id"="origin_request_id" AS "aggr__1__2__key_0")) - WHERE "aggr__1__2__order_1_rank"<=6 - ORDER BY "aggr__1__order_1_rank" ASC, "aggr__1__2__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__1__key_0`, `aggr__1__count`, `aggr__1__2__parent_count`,\n" + + " `aggr__1__2__key_0`, `aggr__1__2__count`\n" + + "FROM (\n" + + " SELECT `aggr__1__key_0`, `aggr__1__count`, `aggr__1__2__parent_count`,\n" + + " `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__1__key_0` ASC) AS `aggr__1__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__1__key_0` ORDER BY\n" + + " `aggr__1__2__count` DESC, `aggr__1__2__key_0` ASC) AS\n" + + " `aggr__1__2__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 2592000000) AS\n" + + " `aggr__1__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__1__key_0`) AS `aggr__1__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__1__key_0`) AS\n" + + " `aggr__1__2__parent_count`,\n" + + " `request_id`=`origin_request_id` AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 2592000000) AS\n" + + " `aggr__1__key_0`, `request_id`=`origin_request_id` AS `aggr__1__2__key_0`))\n" + + "WHERE `aggr__1__2__order_1_rank`<=6\n" + + "ORDER BY `aggr__1__order_1_rank` ASC, `aggr__1__2__order_1_rank` ASC", }, } diff --git a/platform/testdata/facets_requests.go b/platform/testdata/facets_requests.go index 834ea9a4d..c37dbf4c1 100644 --- a/platform/testdata/facets_requests.go +++ b/platform/testdata/facets_requests.go @@ -132,25 +132,24 @@ var TestsNumericFacets = []struct { }, "start_time_in_millis": 0 }`, - ExpectedSql: ` -SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - sum(count(*)) OVER () AS "aggr__sample__count", - maxOrNull(maxOrNull("int64-field")) OVER () AS - "metric__sample__max_value_col_0", - minOrNull(minOrNull("int64-field")) OVER () AS - "metric__sample__min_value_col_0", - sum(count("int64-field")) OVER () AS "metric__sample__sample_count_col_0", - sum(count(*)) OVER () AS "aggr__sample__top_values__parent_count", - "int64-field" AS "aggr__sample__top_values__key_0", - count(*) AS "aggr__sample__top_values__count" -FROM ( - SELECT "int64-field" - FROM __quesma_table_name - LIMIT 20000) -GROUP BY "int64-field" AS "aggr__sample__top_values__key_0" -ORDER BY "aggr__sample__top_values__count" DESC, - "aggr__sample__top_values__key_0" ASC -LIMIT 11`, + ExpectedSql: "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " maxOrNull(maxOrNull(`int64-field`)) OVER () AS\n" + + " `metric__sample__max_value_col_0`,\n" + + " minOrNull(minOrNull(`int64-field`)) OVER () AS\n" + + " `metric__sample__min_value_col_0`,\n" + + " sum(count(`int64-field`)) OVER () AS `metric__sample__sample_count_col_0`,\n" + + " sum(count(*)) OVER () AS `aggr__sample__top_values__parent_count`,\n" + + " `int64-field` AS `aggr__sample__top_values__key_0`,\n" + + " count(*) AS `aggr__sample__top_values__count`\n" + + "FROM (\n" + + " SELECT `int64-field`\n" + + " FROM `__quesma_table_name`\n" + + " LIMIT 20000)\n" + + "GROUP BY `int64-field` AS `aggr__sample__top_values__key_0`\n" + + "ORDER BY `aggr__sample__top_values__count` DESC,\n" + + " `aggr__sample__top_values__key_0` ASC\n" + + "LIMIT 11", NewResultRows: []model.QueryResultRow{ {Cols: []model.QueryResultCol{ model.NewQueryResultCol("metric____quesma_total_count_col_0", 2693), diff --git a/platform/testdata/full_search_requests.go b/platform/testdata/full_search_requests.go index 8b6d41f55..f8313b747 100644 --- a/platform/testdata/full_search_requests.go +++ b/platform/testdata/full_search_requests.go @@ -8,13 +8,13 @@ import ( ) func selectCnt(limit int) string { - return fmt.Sprintf(`SELECT count(*) AS "column_0" FROM (SELECT 1 FROM %s LIMIT %d)`, TableName, limit) + return fmt.Sprintf("SELECT count(*) AS `column_0` FROM (SELECT 1 FROM `%s` LIMIT %d)", TableName, limit) } func selectTotalCnt() string { - return fmt.Sprintf(`SELECT count(*) AS "column_0" FROM %s`, TableName) + return fmt.Sprintf("SELECT count(*) AS `column_0` FROM `%s`", TableName) } func selectStar(limit int) string { - return fmt.Sprintf("SELECT \"@timestamp\", \"message\" FROM %s LIMIT %d", TableName, limit) + return fmt.Sprintf("SELECT `@timestamp`, `message` FROM `%s` LIMIT %d", TableName, limit) } func resultCount(cnt int) []model.QueryResultRow { @@ -423,31 +423,31 @@ var FullSearchRequests = []FullSearchTestCase{ } }`, ExpectedSQLs: []string{ - `SELECT "metric____quesma_total_count_col_0", "aggr__2__key_0", "aggr__2__count", - "aggr__2__3__parent_count", "aggr__2__3__key_0", "aggr__2__3__count" - FROM ( - SELECT "metric____quesma_total_count_col_0", "aggr__2__key_0", - "aggr__2__count", "aggr__2__3__parent_count", "aggr__2__3__key_0", - "aggr__2__3__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__count" DESC, "aggr__2__3__key_0" ASC) AS - "aggr__2__3__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( - "@timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__3__parent_count", NULL AS "aggr__2__3__key_0", - count(*) AS "aggr__2__3__count" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", NULL AS "aggr__2__3__key_0")) - WHERE "aggr__2__3__order_1_rank"<=6 - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + "SELECT `metric____quesma_total_count_col_0`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__3__parent_count`, `aggr__2__3__key_0`, `aggr__2__3__count`\n" + + "FROM (\n" + + " SELECT `metric____quesma_total_count_col_0`, `aggr__2__key_0`,\n" + + " `aggr__2__count`, `aggr__2__3__parent_count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`\n" + + " ,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__count` DESC, `aggr__2__3__key_0` ASC) AS\n" + + " `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(toTimezone(\n" + + " `@timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__3__parent_count`, NULL AS `aggr__2__3__key_0`,\n" + + " count(*) AS `aggr__2__3__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`, NULL AS `aggr__2__3__key_0`))\n" + + "WHERE `aggr__2__3__order_1_rank`<=6\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, ExpectedSQLResults: [][]model.QueryResultRow{{}}, }, @@ -485,13 +485,13 @@ var FullSearchRequests = []FullSearchTestCase{ } }`, ExpectedSQLs: []string{ - `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - sum(count(*)) OVER () AS "aggr__2__parent_count", NULL AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY NULL AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 21`, + "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " sum(count(*)) OVER () AS `aggr__2__parent_count`, NULL AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY NULL AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 21", }, ExpectedSQLResults: [][]model.QueryResultRow{{}}, }, diff --git a/platform/testdata/geo.go b/platform/testdata/geo.go index 9ca1dc336..2eebd05d6 100644 --- a/platform/testdata/geo.go +++ b/platform/testdata/geo.go @@ -73,21 +73,20 @@ var AggregationTestsWithGeographicalCoordinates = []AggregationTestCase{ model.NewQueryResultCol("aggr__large-grid__count", 283), }}, }, - ExpectedPancakeSQL: ` - SELECT FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) AS - "aggr__large-grid__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) AS - "aggr__large-grid__key_1", count(*) AS "aggr__large-grid__count" - FROM __quesma_table_name - GROUP BY FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) AS - "aggr__large-grid__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) AS - "aggr__large-grid__key_1" - ORDER BY "aggr__large-grid__count" DESC, "aggr__large-grid__key_0" ASC, - "aggr__large-grid__key_1" ASC - LIMIT 10000`, + ExpectedPancakeSQL: "SELECT FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8)) AS\n" + + " `aggr__large-grid__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8)) AS\n" + + " `aggr__large-grid__key_1`, count(*) AS `aggr__large-grid__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8)) AS\n" + + " `aggr__large-grid__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8)) AS\n" + + " `aggr__large-grid__key_1`\n" + + "ORDER BY `aggr__large-grid__count` DESC, `aggr__large-grid__key_0` ASC,\n" + + " `aggr__large-grid__key_1` ASC\n" + + "LIMIT 10000", }, { // [1] TestName: "geotile_grid with size", @@ -158,20 +157,19 @@ var AggregationTestsWithGeographicalCoordinates = []AggregationTestCase{ model.NewQueryResultCol("aggr__large-grid__count", 283), }}, }, - ExpectedPancakeSQL: ` - SELECT FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) - AS "aggr__large-grid__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) - AS "aggr__large-grid__key_1", count(*) AS "aggr__large-grid__count" - FROM __quesma_table_name - GROUP BY FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) - AS "aggr__large-grid__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) AS "aggr__large-grid__key_1" - ORDER BY "aggr__large-grid__count" DESC, "aggr__large-grid__key_0" ASC, - "aggr__large-grid__key_1" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8))\n" + + " AS `aggr__large-grid__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8))\n" + + " AS `aggr__large-grid__key_1`, count(*) AS `aggr__large-grid__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8))\n" + + " AS `aggr__large-grid__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8)) AS `aggr__large-grid__key_1`\n" + + "ORDER BY `aggr__large-grid__count` DESC, `aggr__large-grid__key_0` ASC,\n" + + " `aggr__large-grid__key_1` ASC\n" + + "LIMIT 3", }, { // [2] TestName: "geotile_grid with some other aggregations", @@ -296,43 +294,42 @@ var AggregationTestsWithGeographicalCoordinates = []AggregationTestCase{ model.NewQueryResultCol("metric__terms__large-grid__avg_col_0", 50.5), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__terms__parent_count", "aggr__terms__key_0", "aggr__terms__count", - "aggr__terms__large-grid__key_0", "aggr__terms__large-grid__key_1", - "aggr__terms__large-grid__count", "metric__terms__large-grid__avg_col_0" - FROM ( - SELECT "aggr__terms__parent_count", "aggr__terms__key_0", - "aggr__terms__count", "aggr__terms__large-grid__key_0", - "aggr__terms__large-grid__key_1", "aggr__terms__large-grid__count", - "metric__terms__large-grid__avg_col_0", - dense_rank() OVER (ORDER BY "aggr__terms__count" DESC, "aggr__terms__key_0" - ASC) AS "aggr__terms__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__terms__key_0" ORDER BY - "aggr__terms__large-grid__count" DESC, "aggr__terms__large-grid__key_0" ASC, - "aggr__terms__large-grid__key_1" ASC) AS - "aggr__terms__large-grid__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__terms__parent_count", - COALESCE("AvgTicketPrice", 'N/A') AS "aggr__terms__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__terms__key_0") AS - "aggr__terms__count", - FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) AS - "aggr__terms__large-grid__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS( - RADIANS(__quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) AS - "aggr__terms__large-grid__key_1", - count(*) AS "aggr__terms__large-grid__count", - avgOrNull("DistanceKilometers") AS "metric__terms__large-grid__avg_col_0" - FROM __quesma_table_name - GROUP BY COALESCE("AvgTicketPrice", 'N/A') AS "aggr__terms__key_0", - FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 8)) AS - "aggr__terms__large-grid__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS( - RADIANS(__quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 8)) AS - "aggr__terms__large-grid__key_1")) - WHERE ("aggr__terms__order_1_rank"<=2 AND - "aggr__terms__large-grid__order_1_rank"<=3) - ORDER BY "aggr__terms__order_1_rank" ASC, - "aggr__terms__large-grid__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__terms__parent_count`, `aggr__terms__key_0`, `aggr__terms__count`,\n" + + " `aggr__terms__large-grid__key_0`, `aggr__terms__large-grid__key_1`,\n" + + " `aggr__terms__large-grid__count`, `metric__terms__large-grid__avg_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__terms__parent_count`, `aggr__terms__key_0`,\n" + + " `aggr__terms__count`, `aggr__terms__large-grid__key_0`,\n" + + " `aggr__terms__large-grid__key_1`, `aggr__terms__large-grid__count`,\n" + + " `metric__terms__large-grid__avg_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__terms__count` DESC, `aggr__terms__key_0`\n" + + " ASC) AS `aggr__terms__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__terms__key_0` ORDER BY\n" + + " `aggr__terms__large-grid__count` DESC, `aggr__terms__large-grid__key_0` ASC,\n" + + " `aggr__terms__large-grid__key_1` ASC) AS\n" + + " `aggr__terms__large-grid__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__terms__parent_count`,\n" + + " COALESCE(`AvgTicketPrice`, 'N/A') AS `aggr__terms__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__terms__key_0`) AS\n" + + " `aggr__terms__count`,\n" + + " FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8)) AS\n" + + " `aggr__terms__large-grid__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(\n" + + " RADIANS(__quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8)) AS\n" + + " `aggr__terms__large-grid__key_1`,\n" + + " count(*) AS `aggr__terms__large-grid__count`,\n" + + " avgOrNull(`DistanceKilometers`) AS `metric__terms__large-grid__avg_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY COALESCE(`AvgTicketPrice`, 'N/A') AS `aggr__terms__key_0`,\n" + + " FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 8)) AS\n" + + " `aggr__terms__large-grid__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(\n" + + " RADIANS(__quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 8)) AS\n" + + " `aggr__terms__large-grid__key_1`))\n" + + "WHERE (`aggr__terms__order_1_rank`<=2 AND\n" + + " `aggr__terms__large-grid__order_1_rank`<=3)\n" + + "ORDER BY `aggr__terms__order_1_rank` ASC,\n" + + " `aggr__terms__large-grid__order_1_rank` ASC", }, } diff --git a/platform/testdata/grafana.go b/platform/testdata/grafana.go index 1a641ab2b..596ca1362 100644 --- a/platform/testdata/grafana.go +++ b/platform/testdata/grafana.go @@ -75,13 +75,12 @@ var GrafanaAggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 2000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 2000) AS - "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 2000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 2000) AS\n" + + " `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [1] TestName: "1x terms with min_doc_count, need to erase some rows with count < min_doc_count", @@ -141,14 +140,13 @@ var GrafanaAggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__order_1", int64(0)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "extension" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - count(*)>=40 AS "aggr__2__order_1" - FROM __quesma_table_name - GROUP BY "extension" AS "aggr__2__key_0" - ORDER BY "aggr__2__order_1" DESC, "aggr__2__key_0" DESC - LIMIT 5`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `extension` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " count(*)>=40 AS `aggr__2__order_1`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `extension` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__order_1` DESC, `aggr__2__key_0` DESC\n" + + "LIMIT 5", }, { // [2] TestName: "2x terms with min_doc_count", @@ -271,32 +269,31 @@ var GrafanaAggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__3__count", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__order_1", "aggr__2__3__parent_count", "aggr__2__3__key_0", - "aggr__2__3__count" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__order_1", "aggr__2__3__parent_count", "aggr__2__3__key_0", - "aggr__2__3__count", - dense_rank() OVER (ORDER BY "aggr__2__order_1" DESC, "aggr__2__key_0" DESC) - AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__count" DESC, "aggr__2__3__key_0" ASC) AS - "aggr__2__3__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "extension" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)>=30) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__order_1", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__3__parent_count", "message" AS "aggr__2__3__key_0", - count(*) AS "aggr__2__3__count" - FROM __quesma_table_name - GROUP BY "extension" AS "aggr__2__key_0", "message" AS "aggr__2__3__key_0")) - WHERE ("aggr__2__order_1_rank"<=5 AND "aggr__2__3__order_1_rank"<=11) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__order_1`, `aggr__2__3__parent_count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__order_1`, `aggr__2__3__parent_count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__order_1` DESC, `aggr__2__key_0` DESC)\n" + + " AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__count` DESC, `aggr__2__3__key_0` ASC) AS\n" + + " `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `extension` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)>=30) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__order_1`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__3__parent_count`, `message` AS `aggr__2__3__key_0`,\n" + + " count(*) AS `aggr__2__3__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `extension` AS `aggr__2__key_0`, `message` AS `aggr__2__3__key_0`))\n" + + "WHERE (`aggr__2__order_1_rank`<=5 AND `aggr__2__3__order_1_rank`<=11)\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, { // [3] TestName: "simplest geotile_grid", @@ -358,13 +355,12 @@ var GrafanaAggregationTests = []AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", int64(21)), }}, }, - ExpectedPancakeSQL: ` - SELECT geohashEncode(__quesma_geo_lon("geo.coordinates"), __quesma_geo_lat( - "geo.coordinates"), 2) AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY geohashEncode(__quesma_geo_lon("geo.coordinates"), __quesma_geo_lat( - "geo.coordinates"), 2) AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 10000`, + ExpectedPancakeSQL: "SELECT geohashEncode(__quesma_geo_lon(`geo.coordinates`), __quesma_geo_lat(\n" + + " `geo.coordinates`), 2) AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY geohashEncode(__quesma_geo_lon(`geo.coordinates`), __quesma_geo_lat(\n" + + " `geo.coordinates`), 2) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 10000", }, } diff --git a/platform/testdata/kibana-visualize/aggregation_requests.go b/platform/testdata/kibana-visualize/aggregation_requests.go index b58d2994c..70d982149 100644 --- a/platform/testdata/kibana-visualize/aggregation_requests.go +++ b/platform/testdata/kibana-visualize/aggregation_requests.go @@ -231,34 +231,32 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", - "aggr__0__1__key_0", "aggr__0__1__key_1", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", - "aggr__0__1__key_0", "aggr__0__1__key_1", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__key_0" ASC) AS "aggr__0__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__count" DESC, "aggr__0__1__key_0" ASC, "aggr__0__1__key_1" ASC) - AS "aggr__0__1__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 30000) AS - "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS - "aggr__0__1__parent_count", "severity" AS "aggr__0__1__key_0", - "source" AS "aggr__0__1__key_1", count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1716811196627) AND - "@timestamp"<=fromUnixTimestamp64Milli(1716812096627)) - GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( - toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 30000) AS - "aggr__0__key_0", "severity" AS "aggr__0__1__key_0", - "source" AS "aggr__0__1__key_1")) - WHERE "aggr__0__1__order_1_rank"<=3 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__parent_count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__key_1`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__key_0`, `aggr__0__count`, `aggr__0__1__parent_count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__key_1`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__key_0` ASC) AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__count` DESC, `aggr__0__1__key_0` ASC, `aggr__0__1__key_1` ASC)\n" + + " AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 30000) AS\n" + + " `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS\n" + + " `aggr__0__1__parent_count`, `severity` AS `aggr__0__1__key_0`,\n" + + " `source` AS `aggr__0__1__key_1`, count(*) AS `aggr__0__1__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1716811196627) AND\n" + + " `@timestamp`<=fromUnixTimestamp64Milli(1716812096627))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`@timestamp`)+timeZoneOffset(\n" + + " toTimezone(`@timestamp`, 'Europe/Warsaw'))*1000) / 30000) AS\n" + + " `aggr__0__key_0`, `severity` AS `aggr__0__1__key_0`,\n" + + " `source` AS `aggr__0__1__key_1`))\n" + + "WHERE `aggr__0__1__order_1_rank`<=3\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [1] TestName: "Multi_terms with simple count. Visualize: Bar Vertical: Horizontal Axis: Top values (2 values), Vertical: Count of records, Breakdown: @timestamp", @@ -412,30 +410,29 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__order_1", int64(1716834510000/30000)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__key_1", - "aggr__0__count", "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__key_1", - "aggr__0__count", "aggr__0__1__key_0", - "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC, - "aggr__0__key_1" ASC) AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0", "aggr__0__key_1" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "message" AS "aggr__0__key_0", "host.name" AS "aggr__0__key_1", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0", "aggr__0__key_1") AS - "aggr__0__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" - FROM ` + TableName + ` - GROUP BY "message" AS "aggr__0__key_0", "host.name" AS "aggr__0__key_1", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=3 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__key_1`,\n" + + " `aggr__0__count`, `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__key_1`,\n" + + " `aggr__0__count`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC,\n" + + " `aggr__0__key_1` ASC) AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__key_1` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `message` AS `aggr__0__key_0`, `host.name` AS `aggr__0__key_1`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__key_1`) AS\n" + + " `aggr__0__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__1__key_0`, count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY `message` AS `aggr__0__key_0`, `host.name` AS `aggr__0__key_1`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=3\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { //[2], TestName: "Multi_terms with double-nested subaggregations. Visualize: Bar Vertical: Horizontal Axis: Top values (2 values), Vertical: Unique count, Breakdown: @timestamp", @@ -631,34 +628,33 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__1__2_col_0", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__key_1", - "aggr__0__count", "metric__0__2_col_0", "aggr__0__1__key_0", - "aggr__0__1__count", "metric__0__1__2_col_0" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__key_1", - "aggr__0__count", "metric__0__2_col_0", "aggr__0__1__key_0", - "aggr__0__1__count", "metric__0__1__2_col_0", - dense_rank() OVER (ORDER BY "metric__0__2_col_0" DESC, "aggr__0__key_0" ASC, - "aggr__0__key_1" ASC) AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0", "aggr__0__key_1" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "severity" AS "aggr__0__key_0", "source" AS "aggr__0__key_1", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0", "aggr__0__key_1") AS - "aggr__0__count", - uniqMerge(uniqState("severity")) OVER (PARTITION BY "aggr__0__key_0", - "aggr__0__key_1") AS "metric__0__2_col_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__1__key_0", count(*) AS "aggr__0__1__count", - uniq("severity") AS "metric__0__1__2_col_0" - FROM __quesma_table_name - GROUP BY "severity" AS "aggr__0__key_0", "source" AS "aggr__0__key_1", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=3 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__key_1`,\n" + + " `aggr__0__count`, `metric__0__2_col_0`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`, `metric__0__1__2_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__key_1`,\n" + + " `aggr__0__count`, `metric__0__2_col_0`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`, `metric__0__1__2_col_0`,\n" + + " dense_rank() OVER (ORDER BY `metric__0__2_col_0` DESC, `aggr__0__key_0` ASC,\n" + + " `aggr__0__key_1` ASC) AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__key_1` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `severity` AS `aggr__0__key_0`, `source` AS `aggr__0__key_1`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__key_1`) AS\n" + + " `aggr__0__count`,\n" + + " uniqMerge(uniqState(`severity`)) OVER (PARTITION BY `aggr__0__key_0`,\n" + + " `aggr__0__key_1`) AS `metric__0__2_col_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__1__key_0`, count(*) AS `aggr__0__1__count`,\n" + + " uniq(`severity`) AS `metric__0__1__2_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `severity` AS `aggr__0__key_0`, `source` AS `aggr__0__key_1`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=3\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [3] TestName: "Quite simple multi_terms, but with non-string keys. Visualize: Bar Vertical: Horizontal Axis: Date Histogram, Vertical Axis: Count of records, Breakdown: Top values (2 values)", @@ -831,31 +827,30 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__order_1", int64(1716838500000/30000)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__key_1", - "aggr__0__count", "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__key_1", - "aggr__0__count", "aggr__0__1__key_0", - "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC, - "aggr__0__key_1" ASC) AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0", "aggr__0__key_1" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "Cancelled" AS "aggr__0__key_0", "AvgTicketPrice" AS "aggr__0__key_1", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0", "aggr__0__key_1") AS - "aggr__0__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" - FROM ` + TableName + ` - GROUP BY "Cancelled" AS "aggr__0__key_0", - "AvgTicketPrice" AS "aggr__0__key_1", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=3 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__key_1`,\n" + + " `aggr__0__count`, `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__key_1`,\n" + + " `aggr__0__count`, `aggr__0__1__key_0`,\n" + + " `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC,\n" + + " `aggr__0__key_1` ASC) AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__key_1` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `Cancelled` AS `aggr__0__key_0`, `AvgTicketPrice` AS `aggr__0__key_1`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`, `aggr__0__key_1`) AS\n" + + " `aggr__0__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__1__key_0`, count(*) AS `aggr__0__1__count`\n" + + " FROM `" + TableName + "`\n" + + " GROUP BY `Cancelled` AS `aggr__0__key_0`,\n" + + " `AvgTicketPrice` AS `aggr__0__key_1`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=3\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [4] TestName: "percentile with subaggregation (so, combinator). Visualize, Pie, Slice by: top5 of Cancelled, DistanceKilometers, Metric: 95th Percentile", @@ -1030,32 +1025,31 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__1__2_col_0", []float64{9842.6279296875}), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "metric__0__2_col_0", "aggr__0__1__key_0", "aggr__0__1__count", - "metric__0__1__2_col_0" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "metric__0__2_col_0", "aggr__0__1__key_0", "aggr__0__1__count", - "metric__0__1__2_col_0", - dense_rank() OVER (ORDER BY "metric__0__2_col_0" DESC, "aggr__0__key_0" ASC) - AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "Cancelled" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - quantilesMerge(0.950000)(quantilesState(0.950000)("DistanceKilometers")) - OVER (PARTITION BY "aggr__0__key_0") AS "metric__0__2_col_0", - floor("DistanceKilometers"/5000)*5000 AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count", - quantiles(0.950000)("DistanceKilometers") AS "metric__0__1__2_col_0" - FROM __quesma_table_name - GROUP BY "Cancelled" AS "aggr__0__key_0", - floor("DistanceKilometers"/5000)*5000 AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=6 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `metric__0__2_col_0`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " `metric__0__1__2_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `metric__0__2_col_0`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " `metric__0__1__2_col_0`,\n" + + " dense_rank() OVER (ORDER BY `metric__0__2_col_0` DESC, `aggr__0__key_0` ASC)\n" + + " AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `Cancelled` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " quantilesMerge(0.950000)(quantilesState(0.950000)(`DistanceKilometers`))\n" + + " OVER (PARTITION BY `aggr__0__key_0`) AS `metric__0__2_col_0`,\n" + + " floor(`DistanceKilometers`/5000)*5000 AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`,\n" + + " quantiles(0.950000)(`DistanceKilometers`) AS `metric__0__1__2_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `Cancelled` AS `aggr__0__key_0`,\n" + + " floor(`DistanceKilometers`/5000)*5000 AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=6\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [5] TestName: "terms with order by agg1>agg2 (multiple aggregations)", @@ -1177,16 +1171,15 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__2-bucket__2-metric_col_0", 19285.5078125), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "AvgTicketPrice" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("bytes_gauge" IS NOT NULL) AS "aggr__0__2-bucket__count", - maxOrNullIf("DistanceKilometers", "bytes_gauge" IS NOT NULL) AS - "metric__0__2-bucket__2-metric_col_0" - FROM __quesma_table_name - GROUP BY "AvgTicketPrice" AS "aggr__0__key_0" - ORDER BY "metric__0__2-bucket__2-metric_col_0" DESC, "aggr__0__key_0" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `AvgTicketPrice` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " countIf(`bytes_gauge` IS NOT NULL) AS `aggr__0__2-bucket__count`,\n" + + " maxOrNullIf(`DistanceKilometers`, `bytes_gauge` IS NOT NULL) AS\n" + + " `metric__0__2-bucket__2-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `AvgTicketPrice` AS `aggr__0__key_0`\n" + + "ORDER BY `metric__0__2-bucket__2-metric_col_0` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 3", }, { // [6] TestName: "terms with order by stats, easily reproducible in Kibana Visualize", @@ -1328,22 +1321,21 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__1_col_4", 25545.0), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "Carrier" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - count("FlightDelayMin") AS "metric__0__1_col_0", - minOrNull("FlightDelayMin") AS "metric__0__1_col_1", - maxOrNull("FlightDelayMin") AS "metric__0__1_col_2", - avgOrNull("FlightDelayMin") AS "metric__0__1_col_3", - sumOrNull("FlightDelayMin") AS "metric__0__1_col_4" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1725723024239) AND "timestamp"<= - fromUnixTimestamp64Milli(1727019024239)) - GROUP BY "Carrier" AS "aggr__0__key_0" - ORDER BY "metric__0__1_col_1" DESC, "metric__0__1_col_0" DESC, - "metric__0__1_col_3" DESC, "metric__0__1_col_2" ASC, - "metric__0__1_col_4" DESC, "aggr__0__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `Carrier` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " count(`FlightDelayMin`) AS `metric__0__1_col_0`,\n" + + " minOrNull(`FlightDelayMin`) AS `metric__0__1_col_1`,\n" + + " maxOrNull(`FlightDelayMin`) AS `metric__0__1_col_2`,\n" + + " avgOrNull(`FlightDelayMin`) AS `metric__0__1_col_3`,\n" + + " sumOrNull(`FlightDelayMin`) AS `metric__0__1_col_4`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1725723024239) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1727019024239))\n" + + "GROUP BY `Carrier` AS `aggr__0__key_0`\n" + + "ORDER BY `metric__0__1_col_1` DESC, `metric__0__1_col_0` DESC,\n" + + " `metric__0__1_col_3` DESC, `metric__0__1_col_2` ASC,\n" + + " `metric__0__1_col_4` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 4", }, { // [7] TestName: "terms with order by extended_stats (easily reproducible in Kibana Visualize)", @@ -1491,30 +1483,29 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__0__1_col_9", 98.1100106105745), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "Carrier" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - count("FlightDelayMin") AS "metric__0__1_col_0", - minOrNull("FlightDelayMin") AS "metric__0__1_col_1", - maxOrNull("FlightDelayMin") AS "metric__0__1_col_2", - avgOrNull("FlightDelayMin") AS "metric__0__1_col_3", - sumOrNull("FlightDelayMin") AS "metric__0__1_col_4", - sumOrNull("FlightDelayMin"*"FlightDelayMin") AS "metric__0__1_col_5", - varPop("FlightDelayMin") AS "metric__0__1_col_6", - varSamp("FlightDelayMin") AS "metric__0__1_col_7", - stddevPop("FlightDelayMin") AS "metric__0__1_col_8", - stddevSamp("FlightDelayMin") AS "metric__0__1_col_9" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1725723024239) AND "timestamp"<= - fromUnixTimestamp64Milli(1727019024239)) - GROUP BY "Carrier" AS "aggr__0__key_0" - ORDER BY "metric__0__1_col_1" DESC, "metric__0__1_col_0" DESC, - "metric__0__1_col_3" DESC, "metric__0__1_col_2" ASC, - "metric__0__1_col_4" DESC, "metric__0__1_col_5" DESC, - "metric__0__1_col_6" DESC, "metric__0__1_col_6" DESC, - "metric__0__1_col_7" DESC, "metric__0__1_col_8" DESC, - "metric__0__1_col_8" DESC, "metric__0__1_col_9" DESC, "aggr__0__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `Carrier` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " count(`FlightDelayMin`) AS `metric__0__1_col_0`,\n" + + " minOrNull(`FlightDelayMin`) AS `metric__0__1_col_1`,\n" + + " maxOrNull(`FlightDelayMin`) AS `metric__0__1_col_2`,\n" + + " avgOrNull(`FlightDelayMin`) AS `metric__0__1_col_3`,\n" + + " sumOrNull(`FlightDelayMin`) AS `metric__0__1_col_4`,\n" + + " sumOrNull(`FlightDelayMin`*`FlightDelayMin`) AS `metric__0__1_col_5`,\n" + + " varPop(`FlightDelayMin`) AS `metric__0__1_col_6`,\n" + + " varSamp(`FlightDelayMin`) AS `metric__0__1_col_7`,\n" + + " stddevPop(`FlightDelayMin`) AS `metric__0__1_col_8`,\n" + + " stddevSamp(`FlightDelayMin`) AS `metric__0__1_col_9`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1725723024239) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1727019024239))\n" + + "GROUP BY `Carrier` AS `aggr__0__key_0`\n" + + "ORDER BY `metric__0__1_col_1` DESC, `metric__0__1_col_0` DESC,\n" + + " `metric__0__1_col_3` DESC, `metric__0__1_col_2` ASC,\n" + + " `metric__0__1_col_4` DESC, `metric__0__1_col_5` DESC,\n" + + " `metric__0__1_col_6` DESC, `metric__0__1_col_6` DESC,\n" + + " `metric__0__1_col_7` DESC, `metric__0__1_col_8` DESC,\n" + + " `metric__0__1_col_8` DESC, `metric__0__1_col_9` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 4", }, { // [8] TestName: "Terms with order by top metrics", @@ -1773,33 +1764,32 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__order_1", "aggr__0__1__key_0", "aggr__0__1__count", - "aggr__0__1__2-bucket__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__order_1", "aggr__0__1__key_0", "aggr__0__1__count", - "aggr__0__1__2-bucket__count", - dense_rank() OVER (ORDER BY "aggr__0__order_1" DESC, "aggr__0__key_0" ASC) - AS "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "AvgTicketPrice" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - "top_metrics__0__2-bucket__2-metric_col_0" AS "aggr__0__order_1", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count", - countIf("bytes_gauge" IS NOT NULL) AS "aggr__0__1__2-bucket__count" - FROM __quesma_table_name - GROUP BY "AvgTicketPrice" AS "aggr__0__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=13 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__order_1`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " `aggr__0__1__2-bucket__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__order_1`, `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " `aggr__0__1__2-bucket__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__order_1` DESC, `aggr__0__key_0` ASC)\n" + + " AS `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `AvgTicketPrice` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " `top_metrics__0__2-bucket__2-metric_col_0` AS `aggr__0__order_1`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`,\n" + + " countIf(`bytes_gauge` IS NOT NULL) AS `aggr__0__1__2-bucket__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `AvgTicketPrice` AS `aggr__0__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=13\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [9] TestName: "Line, Y-axis: Min, Buckets: Date Range, X-Axis: Terms, Split Chart: Date Histogram", @@ -2061,93 +2051,88 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__3__4__1_col_0", 360.0), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__count", "aggr__2__3__parent_count", "aggr__2__3__key_0", - "aggr__2__3__count", "metric__2__3__1_col_0", "aggr__2__3__4__key_0", - "aggr__2__3__4__count", "metric__2__3__4__1_col_0" - FROM ( - SELECT "aggr__2__count", "aggr__2__3__parent_count", "aggr__2__3__key_0", - "aggr__2__3__count", "metric__2__3__1_col_0", "aggr__2__3__4__key_0", - "aggr__2__3__4__count", "metric__2__3__4__1_col_0", - dense_rank() OVER (ORDER BY "metric__2__3__1_col_0" DESC, - "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY - "aggr__2__3__4__key_0" ASC) AS "aggr__2__3__4__order_1_rank" - FROM ( - SELECT sum(countIf(("timestamp">=toInt64(toUnixTimestamp(toStartOfWeek( - subDate(now(), INTERVAL 1 week)))) AND "timestamp"=toInt64(toUnixTimestamp(toStartOfWeek(subDate( - now(), INTERVAL 1 week)))) AND "timestamp"=toInt64(toUnixTimestamp(toStartOfWeek(subDate( - now(), INTERVAL 1 week)))) AND "timestamp"=toInt64( - toUnixTimestamp(toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND - "timestamp"=toInt64(toUnixTimestamp(toStartOfWeek(subDate(now(), - INTERVAL 1 week)))) AND "timestamp"=toInt64(toUnixTimestamp( - toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND "timestamp"=fromUnixTimestamp64Milli(1258014686584) AND "timestamp" - <=fromUnixTimestamp64Milli(1731400286584)) AND ("timestamp">=toInt64( - toUnixTimestamp(toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND - "timestamp"=toInt64(toUnixTimestamp(subDate(now(), - INTERVAL 1 day))))) OVER () AS "aggr__2__count", - sum(countIf("timestamp">=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 - day))))) OVER () AS "aggr__2__3__parent_count", - "DistanceKilometers" AS "aggr__2__3__key_0", - sum(countIf("timestamp">=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 - day))))) OVER (PARTITION BY "aggr__2__3__key_0") AS "aggr__2__3__count", - minOrNull(minOrNullIf("FlightDelayMin", "timestamp">=toInt64( - toUnixTimestamp(subDate(now(), INTERVAL 1 day))))) OVER (PARTITION BY - "aggr__2__3__key_0") AS "metric__2__3__1_col_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS - "aggr__2__3__4__key_0", - countIf("timestamp">=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 day - )))) AS "aggr__2__3__4__count", - minOrNullIf("FlightDelayMin", "timestamp">=toInt64(toUnixTimestamp(subDate - (now(), INTERVAL 1 day)))) AS "metric__2__3__4__1_col_0" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1258014686584) AND "timestamp" - <=fromUnixTimestamp64Milli(1731400286584)) AND "timestamp">=toInt64( - toUnixTimestamp(subDate(now(), INTERVAL 1 day)))) - GROUP BY "DistanceKilometers" AS "aggr__2__3__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS - "aggr__2__3__4__key_0")) - WHERE "aggr__2__3__order_1_rank"<=6 - ORDER BY "aggr__2__3__order_1_rank" ASC, "aggr__2__3__4__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__count`, `aggr__2__3__parent_count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`, `metric__2__3__1_col_0`, `aggr__2__3__4__key_0`,\n" + + " `aggr__2__3__4__count`, `metric__2__3__4__1_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__count`, `aggr__2__3__parent_count`, `aggr__2__3__key_0`,\n" + + " `aggr__2__3__count`, `metric__2__3__1_col_0`, `aggr__2__3__4__key_0`,\n" + + " `aggr__2__3__4__count`, `metric__2__3__4__1_col_0`,\n" + + " dense_rank() OVER (ORDER BY `metric__2__3__1_col_0` DESC,\n" + + " `aggr__2__3__key_0` ASC) AS `aggr__2__3__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__3__key_0` ORDER BY\n" + + " `aggr__2__3__4__key_0` ASC) AS `aggr__2__3__4__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(countIf((`timestamp`>=toInt64(toUnixTimestamp(toStartOfWeek(\n" + + " subDate(now(), INTERVAL 1 week)))) AND `timestamp`=toInt64(toUnixTimestamp(toStartOfWeek(subDate(\n" + + " now(), INTERVAL 1 week)))) AND `timestamp`=toInt64(toUnixTimestamp(toStartOfWeek(subDate(\n" + + " now(), INTERVAL 1 week)))) AND `timestamp`=toInt64(\n" + + " toUnixTimestamp(toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND\n" + + " `timestamp`=toInt64(toUnixTimestamp(toStartOfWeek(subDate(now(),\n" + + " INTERVAL 1 week)))) AND `timestamp`=toInt64(toUnixTimestamp(\n" + + " toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND `timestamp`=fromUnixTimestamp64Milli(1258014686584) AND `timestamp`\n" + + " <=fromUnixTimestamp64Milli(1731400286584)) AND (`timestamp`>=toInt64(\n" + + " toUnixTimestamp(toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND\n" + + " `timestamp`=toInt64(toUnixTimestamp(subDate(now(),\n" + + " INTERVAL 1 day))))) OVER () AS `aggr__2__count`,\n" + + " sum(countIf(`timestamp`>=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1\n" + + " day))))) OVER () AS `aggr__2__3__parent_count`,\n" + + " `DistanceKilometers` AS `aggr__2__3__key_0`,\n" + + " sum(countIf(`timestamp`>=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1\n" + + " day))))) OVER (PARTITION BY `aggr__2__3__key_0`) AS `aggr__2__3__count`,\n" + + " minOrNull(minOrNullIf(`FlightDelayMin`, `timestamp`>=toInt64(\n" + + " toUnixTimestamp(subDate(now(), INTERVAL 1 day))))) OVER (PARTITION BY\n" + + " `aggr__2__3__key_0`) AS `metric__2__3__1_col_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 2592000000) AS `aggr__2__3__4__key_0`,\n" + + " countIf(`timestamp`>=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 day\n" + + " )))) AS `aggr__2__3__4__count`,\n" + + " minOrNullIf(`FlightDelayMin`, `timestamp`>=toInt64(toUnixTimestamp(subDate\n" + + " (now(), INTERVAL 1 day)))) AS `metric__2__3__4__1_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1258014686584) AND `timestamp`\n" + + " <=fromUnixTimestamp64Milli(1731400286584)) AND `timestamp`>=toInt64(\n" + + " toUnixTimestamp(subDate(now(), INTERVAL 1 day))))\n" + + " GROUP BY `DistanceKilometers` AS `aggr__2__3__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 2592000000) AS `aggr__2__3__4__key_0`))\n" + + "WHERE `aggr__2__3__order_1_rank`<=6\n" + + "ORDER BY `aggr__2__3__order_1_rank` ASC, `aggr__2__3__4__order_1_rank` ASC", }, ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { @@ -2212,9 +2197,8 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 14074), }}, }, - ExpectedPancakeSQL: ` - SELECT count(*) AS "aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [11] TestName: "simplest IP Prefix (Kibana 8.13+), ipv4 field, prefix_length=1", @@ -2276,12 +2260,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 6784), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 2147483648) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 2147483648) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 2147483648) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 2147483648) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [12] TestName: "simplest IP Prefix (Kibana 8.13+), ipv4 field, prefix_length=10", @@ -2387,12 +2370,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 4194304) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 4194304) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 4194304) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 4194304) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [13] TestName: "simplest IP Prefix (Kibana 8.13+), ipv4 field, prefix_length=32", @@ -2499,12 +2481,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 1) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 1) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 1) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 1) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [14] TestName: "simplest IP Prefix (Kibana 8.13+), ipv4 field, keyed=true", @@ -2565,12 +2546,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 6784), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 8192) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 8192) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 8192) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 8192) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [15] TestName: "simplest IP Prefix (Kibana 8.13+), ipv4 field, append_prefix_length=true", @@ -2633,12 +2613,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 6784), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 128) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 128) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 128) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 128) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [16] TestName: "simplest IP Prefix (Kibana 8.13+), ipv4 field, keyed=true, append_prefix_length=true", @@ -2700,11 +2679,10 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 6784), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 2) AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 2) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 2) AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 2) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [17] TestName: "IP Prefix with other aggregations", @@ -2903,28 +2881,27 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__3__4_col_0", 1831), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__3__key_0", "aggr__2__3__count", "metric__2__3__4_col_0" - FROM ( - SELECT "aggr__2__parent_count", "aggr__2__key_0", "aggr__2__count", - "aggr__2__3__key_0", "aggr__2__3__count", "metric__2__3__4_col_0", - dense_rank() OVER (ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC) AS - "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "bytes" AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - intDiv("clientip", 1073741824) AS "aggr__2__3__key_0", - count(*) AS "aggr__2__3__count", - sumOrNull("bytes") AS "metric__2__3__4_col_0" - FROM __quesma_table_name - GROUP BY "bytes" AS "aggr__2__key_0", - intDiv("clientip", 1073741824) AS "aggr__2__3__key_0")) - WHERE "aggr__2__order_1_rank"<=3 - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__3__key_0`, `aggr__2__3__count`, `metric__2__3__4_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__parent_count`, `aggr__2__key_0`, `aggr__2__count`,\n" + + " `aggr__2__3__key_0`, `aggr__2__3__count`, `metric__2__3__4_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC) AS\n" + + " `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__3__key_0` ASC) AS `aggr__2__3__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `bytes` AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " intDiv(`clientip`, 1073741824) AS `aggr__2__3__key_0`,\n" + + " count(*) AS `aggr__2__3__count`,\n" + + " sumOrNull(`bytes`) AS `metric__2__3__4_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `bytes` AS `aggr__2__key_0`,\n" + + " intDiv(`clientip`, 1073741824) AS `aggr__2__3__key_0`))\n" + + "WHERE `aggr__2__order_1_rank`<=3\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3__order_1_rank` ASC", }, { // [18] TestName: "simplest IP Prefix (Kibana 8.13+), ipv6 field, prefix_length=0", @@ -2972,9 +2949,8 @@ var AggregationTests = []testdata.AggregationTestCase{ ExpectedPancakeResults: []model.QueryResultRow{ {Cols: []model.QueryResultCol{model.NewQueryResultCol("aggr__2__count", 14074)}}, }, - ExpectedPancakeSQL: ` - SELECT count(*) AS "aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [19] TestName: "simplest IP Prefix (Kibana 8.13+), ipv6 field, prefix_length=128", @@ -3035,11 +3011,10 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 2), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 1) AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 1) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 1) AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 1) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [20] TestName: "IP Prefix (Kibana 8.13+), ipv6 field, keyed=true and overflow of 1<<(128-prefix_length)", @@ -3090,12 +3065,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 14074), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 1237940039285380274899124224) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 1237940039285380274899124224) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 1237940039285380274899124224) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 1237940039285380274899124224) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [21] TestName: "simple IP Prefix (Kibana 8.13+), ipv6 field, non-zero& and non-ipv4 key", @@ -3146,12 +3120,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 14074), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 8589934592) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 8589934592) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 8589934592) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 8589934592) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [22] TestName: "IP Prefix (Kibana 8.13+), ipv6 field, multiple keys and append_prefix_length=true", @@ -3202,12 +3175,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 14074), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 8589934592) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 8589934592) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 8589934592) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 8589934592) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [23] TestName: "IP Prefix (Kibana 8.13+), ipv6 field, multiple keys and append_prefix_length=true", @@ -3269,12 +3241,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 6784), }}, }, - ExpectedPancakeSQL: ` - SELECT intDiv("clientip", 2147483648) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY intDiv("clientip", 2147483648) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT intDiv(`clientip`, 2147483648) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY intDiv(`clientip`, 2147483648) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [24] TestName: "Simplest IP range. In Kibana: Add panel > Aggregation Based > Area. Buckets: X-asis: IP Range", @@ -3363,13 +3334,12 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_3__aggr__2__count", int64(534)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("clientip">='0.0.0.0' AND "clientip"<'127.255.255.255')) AS - "range_0__aggr__2__count", - countIf("clientip">='128.0.0.0') AS "range_1__aggr__2__count", - countIf("clientip">='128.129.130.131') AS "range_2__aggr__2__count", - countIf("clientip"<'10.0.0.5') AS "range_3__aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT countIf((`clientip`>='0.0.0.0' AND `clientip`<'127.255.255.255')) AS\n" + + " `range_0__aggr__2__count`,\n" + + " countIf(`clientip`>='128.0.0.0') AS `range_1__aggr__2__count`,\n" + + " countIf(`clientip`>='128.129.130.131') AS `range_2__aggr__2__count`,\n" + + " countIf(`clientip`<'10.0.0.5') AS `range_3__aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [25] TestName: "IP range, with ranges as CIDR masks. In Kibana: Add panel > Aggregation Based > Area. Buckets: X-asis: IP Range", @@ -3447,12 +3417,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_2__aggr__2__count", int64(3)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("clientip">='255.255.255.252' AND "clientip"<'::1:0:0:0')) AS "range_0__aggr__2__count", - countIf("clientip">='128.129.130.131') AS "range_1__aggr__2__count", - countIf(("clientip">='10.0.7.96' AND "clientip"<'10.0.7.128')) AS - "range_2__aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT countIf((`clientip`>='255.255.255.252' AND `clientip`<'::1:0:0:0')) AS `range_0__aggr__2__count`,\n" + + " countIf(`clientip`>='128.129.130.131') AS `range_1__aggr__2__count`,\n" + + " countIf((`clientip`>='10.0.7.96' AND `clientip`<'10.0.7.128')) AS\n" + + " `range_2__aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [26] TestName: "IP range, with ranges as CIDR masks, keyed=true. In Kibana: Add panel > Aggregation Based > Area. Buckets: X-asis: IP Range", @@ -3528,12 +3497,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_2__aggr__2__count", int64(3)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("clientip">='255.255.255.254' AND "clientip"<'::1:0:0:0')) AS "range_0__aggr__2__count", - countIf("clientip">='128.129.130.131') AS "range_1__aggr__2__count", - countIf(("clientip">='10.0.7.96' AND "clientip"<'10.0.7.128')) AS - "range_2__aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT countIf((`clientip`>='255.255.255.254' AND `clientip`<'::1:0:0:0')) AS `range_0__aggr__2__count`,\n" + + " countIf(`clientip`>='128.129.130.131') AS `range_1__aggr__2__count`,\n" + + " countIf((`clientip`>='10.0.7.96' AND `clientip`<'10.0.7.128')) AS\n" + + " `range_2__aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [27] TestName: "IP range ipv6", @@ -3591,13 +3559,12 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_2__aggr__2__count", int64(999999)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("clientip">='1::132:13:21:23:122:22') AS - "range_0__aggr__2__count", - countIf("clientip"<'1::132:13:21:23:122:22') AS "range_1__aggr__2__count", - countIf("clientip"<'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') AS - "range_2__aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT countIf(`clientip`>='1::132:13:21:23:122:22') AS\n" + + " `range_0__aggr__2__count`,\n" + + " countIf(`clientip`<'1::132:13:21:23:122:22') AS `range_1__aggr__2__count`,\n" + + " countIf(`clientip`<'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') AS\n" + + " `range_2__aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [28] TestName: "IP range ipv6 with mask", @@ -3646,11 +3613,10 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_1__aggr__2__count", int64(0)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("clientip"<'4000::') AS "range_0__aggr__2__count", - countIf("clientip">='ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe') AS - "range_1__aggr__2__count" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT countIf(`clientip`<'4000::') AS `range_0__aggr__2__count`,\n" + + " countIf(`clientip`>='ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe') AS\n" + + " `range_1__aggr__2__count`\n" + + "FROM `__quesma_table_name`", }, { // [29] TestName: `Simplest Rate aggregation: only 'unit' present (all possible units), with date_histogram's calendar_interval, @@ -3790,13 +3756,12 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 5011), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("timestamp", - 'Europe/Warsaw'))))*1000 AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("timestamp", - 'Europe/Warsaw'))))*1000 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`timestamp`,\n" + + " 'Europe/Warsaw'))))*1000 AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`timestamp`,\n" + + " 'Europe/Warsaw'))))*1000 AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [30] TestName: "Rate aggregation: all possible units with date_histogram's fixed_interval ('field' present)", @@ -3906,21 +3871,20 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__week_col_0", 1958.13671875), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count", - sumOrNull("DistanceKilometers") AS "metric__2__day_col_0", - sumOrNull("DistanceKilometers") AS "metric__2__hour_col_0", - sumOrNull("DistanceKilometers") AS "metric__2__minute_col_0", - sumOrNull("DistanceKilometers") AS "metric__2__second_col_0", - sumOrNull("DistanceKilometers") AS "metric__2__week_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1731584141864) AND "timestamp"<= - fromUnixTimestamp64Milli(1731585041864)) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 30000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`,\n" + + " sumOrNull(`DistanceKilometers`) AS `metric__2__day_col_0`,\n" + + " sumOrNull(`DistanceKilometers`) AS `metric__2__hour_col_0`,\n" + + " sumOrNull(`DistanceKilometers`) AS `metric__2__minute_col_0`,\n" + + " sumOrNull(`DistanceKilometers`) AS `metric__2__second_col_0`,\n" + + " sumOrNull(`DistanceKilometers`) AS `metric__2__week_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1731584141864) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1731585041864))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 30000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [31] TestName: "Rate aggregation: all possible units with date_histogram's calendar_interval ('field' present)", @@ -4067,23 +4031,22 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__year_col_0", 5011), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("timestamp", - 'Europe/Warsaw'))))*1000 AS "aggr__2__key_0", count(*) AS "aggr__2__count", - count("DistanceKilometers") AS "metric__2__day_col_0", - count("DistanceKilometers") AS "metric__2__hour_col_0", - count("DistanceKilometers") AS "metric__2__minute_col_0", - count("DistanceKilometers") AS "metric__2__month_col_0", - count("DistanceKilometers") AS "metric__2__quarter_col_0", - count("DistanceKilometers") AS "metric__2__second_col_0", - sumOrNull("DistanceKilometers") AS "metric__2__week_col_0", - count("DistanceKilometers") AS "metric__2__year_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1668427553316) AND "timestamp"<= - fromUnixTimestamp64Milli(1731585953316)) - GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("timestamp", - 'Europe/Warsaw'))))*1000 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`timestamp`,\n" + + " 'Europe/Warsaw'))))*1000 AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " count(`DistanceKilometers`) AS `metric__2__day_col_0`,\n" + + " count(`DistanceKilometers`) AS `metric__2__hour_col_0`,\n" + + " count(`DistanceKilometers`) AS `metric__2__minute_col_0`,\n" + + " count(`DistanceKilometers`) AS `metric__2__month_col_0`,\n" + + " count(`DistanceKilometers`) AS `metric__2__quarter_col_0`,\n" + + " count(`DistanceKilometers`) AS `metric__2__second_col_0`,\n" + + " sumOrNull(`DistanceKilometers`) AS `metric__2__week_col_0`,\n" + + " count(`DistanceKilometers`) AS `metric__2__year_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1668427553316) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1731585953316))\n" + + "GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone(`timestamp`,\n" + + " 'Europe/Warsaw'))))*1000 AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [32] TestName: "Rate aggregation with some bigger aggregation tree", @@ -4178,32 +4141,31 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__dh1__dh2__count", 9), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__dh1__key_0", "aggr__dh1__count", "metric__dh1__rate1_col_0", - "aggr__dh1__dh2__key_0", "aggr__dh1__dh2__count" - FROM ( - SELECT "aggr__dh1__key_0", "aggr__dh1__count", "metric__dh1__rate1_col_0", - "aggr__dh1__dh2__key_0", "aggr__dh1__dh2__count", - dense_rank() OVER (ORDER BY "aggr__dh1__key_0" ASC) AS - "aggr__dh1__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__dh1__key_0" ORDER BY - "aggr__dh1__dh2__key_0" ASC) AS "aggr__dh1__dh2__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__dh1__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__dh1__key_0") AS "aggr__dh1__count" - , - sum(count("DistanceKilometers")) OVER (PARTITION BY "aggr__dh1__key_0") AS - "metric__dh1__rate1_col_0", - toInt64(toUnixTimestamp64Milli("timestamp") / 3600000) AS - "aggr__dh1__dh2__key_0", count(*) AS "aggr__dh1__dh2__count" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__dh1__key_0", - toInt64(toUnixTimestamp64Milli("timestamp") / 3600000) AS - "aggr__dh1__dh2__key_0")) - ORDER BY "aggr__dh1__order_1_rank" ASC, "aggr__dh1__dh2__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__dh1__key_0`, `aggr__dh1__count`, `metric__dh1__rate1_col_0`,\n" + + " `aggr__dh1__dh2__key_0`, `aggr__dh1__dh2__count`\n" + + "FROM (\n" + + " SELECT `aggr__dh1__key_0`, `aggr__dh1__count`, `metric__dh1__rate1_col_0`,\n" + + " `aggr__dh1__dh2__key_0`, `aggr__dh1__dh2__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__dh1__key_0` ASC) AS\n" + + " `aggr__dh1__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__dh1__key_0` ORDER BY\n" + + " `aggr__dh1__dh2__key_0` ASC) AS `aggr__dh1__dh2__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__dh1__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__dh1__key_0`) AS `aggr__dh1__count`\n" + + " ,\n" + + " sum(count(`DistanceKilometers`)) OVER (PARTITION BY `aggr__dh1__key_0`) AS\n" + + " `metric__dh1__rate1_col_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`timestamp`) / 3600000) AS\n" + + " `aggr__dh1__dh2__key_0`, count(*) AS `aggr__dh1__dh2__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__dh1__key_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`timestamp`) / 3600000) AS\n" + + " `aggr__dh1__dh2__key_0`))\n" + + "ORDER BY `aggr__dh1__order_1_rank` ASC, `aggr__dh1__dh2__order_1_rank` ASC", }, } diff --git a/platform/testdata/kibana-visualize/pipeline_aggregation_requests.go b/platform/testdata/kibana-visualize/pipeline_aggregation_requests.go index 00863aab6..12c9140c5 100644 --- a/platform/testdata/kibana-visualize/pipeline_aggregation_requests.go +++ b/platform/testdata/kibana-visualize/pipeline_aggregation_requests.go @@ -178,35 +178,33 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-bucket__1-metric_col_0", time.UnixMilli(1727128681000)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__key_0", - "aggr__2__1-bucket__count", "metric__2__1-bucket__1-metric_col_0" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__key_0", - "aggr__2__1-bucket__count", "metric__2__1-bucket__1-metric_col_0", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY "aggr__2__key_0" - ASC, "aggr__2__1-bucket__key_0" ASC) AS "aggr__2__1-bucket__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__1-bucket__key_0", count(*) AS "aggr__2__1-bucket__count", - maxOrNull("timestamp") AS "metric__2__1-bucket__1-metric_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1726848963807) AND "timestamp" - <=fromUnixTimestamp64Milli(1728144963807)) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__1-bucket__key_0")) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__1-bucket__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__1-bucket__key_0`,\n" + + " `aggr__2__1-bucket__count`, `metric__2__1-bucket__1-metric_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__1-bucket__key_0`,\n" + + " `aggr__2__1-bucket__count`, `metric__2__1-bucket__1-metric_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`\n" + + " ,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY `aggr__2__key_0`\n" + + " ASC, `aggr__2__1-bucket__key_0` ASC) AS `aggr__2__1-bucket__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__1-bucket__key_0`, count(*) AS `aggr__2__1-bucket__count`,\n" + + " maxOrNull(`timestamp`) AS `metric__2__1-bucket__1-metric_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1726848963807) AND `timestamp`<=fromUnixTimestamp64Milli(1728144963807))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__1-bucket__key_0`))\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__1-bucket__order_1_rank` ASC", }, { // [1] TestName: "Reproduce: Visualize -> Vertical Bar: Metrics: Cumulative Sum (Aggregation: Avg), Buckets: Date Histogram", @@ -356,15 +354,14 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-metric_col_0", 7.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 60000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count", - avgOrNull("dayOfWeek") AS "metric__2__1-metric_col_0" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 60000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 60000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`,\n" + + " avgOrNull(`dayOfWeek`) AS `metric__2__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 60000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [2] TestName: "Reproduce: Visualize -> Vertical Bar: Metrics: Cumulative Sum (Aggregation: Cumulative Sum (Aggregation: Count)), Buckets: Date Histogram", @@ -527,33 +524,32 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__3-bucket__3-metric_col_0", 165), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3-bucket__key_0", - "aggr__2__3-bucket__count", "metric__2__3-bucket__3-metric_col_0" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3-bucket__key_0", - "aggr__2__3-bucket__count", "metric__2__3-bucket__3-metric_col_0", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank" - , - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY "aggr__2__key_0" - ASC, "aggr__2__3-bucket__key_0" ASC) AS "aggr__2__3-bucket__order_1_rank" - FROM ( - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__3-bucket__key_0", count(*) AS "aggr__2__3-bucket__count", - uniq("timestamp") AS "metric__2__3-bucket__3-metric_col_0" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__3-bucket__key_0")) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3-bucket__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3-bucket__key_0`,\n" + + " `aggr__2__3-bucket__count`, `metric__2__3-bucket__3-metric_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__3-bucket__key_0`,\n" + + " `aggr__2__3-bucket__count`, `metric__2__3-bucket__3-metric_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`\n" + + " ,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY `aggr__2__key_0`\n" + + " ASC, `aggr__2__3-bucket__key_0` ASC) AS `aggr__2__3-bucket__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__3-bucket__key_0`, count(*) AS `aggr__2__3-bucket__count`,\n" + + " uniq(`timestamp`) AS `metric__2__3-bucket__3-metric_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__3-bucket__key_0`))\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__3-bucket__order_1_rank` ASC", }, { // [3] TestName: "Reproduce: Visualize -> Vertical Bar: Metrics: Cumulative Sum (Aggregation: Count), Buckets: Histogram" + @@ -668,11 +664,10 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", int64(3)), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("DistanceMiles"/10)*10 AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY floor("DistanceMiles"/10)*10 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`DistanceMiles`/10)*10 AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY floor(`DistanceMiles`/10)*10 AS `aggr__2__key_0`\n" + + " ORDER BY `aggr__2__key_0` ASC", }, } diff --git a/platform/testdata/kibana_sample_data_ecommerce.go b/platform/testdata/kibana_sample_data_ecommerce.go index 30a55beef..602a19eca 100644 --- a/platform/testdata/kibana_sample_data_ecommerce.go +++ b/platform/testdata/kibana_sample_data_ecommerce.go @@ -104,12 +104,11 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__minAgg_col_0", 2.0), }}, }, - ExpectedPancakeSQL: ` - SELECT maxOrNull("total_quantity") AS "metric__maxAgg_col_0", - minOrNull("total_quantity") AS "metric__minAgg_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date"<= - fromUnixTimestamp64Milli(1740584933594))`, + ExpectedPancakeSQL: "SELECT maxOrNull(`total_quantity`) AS `metric__maxAgg_col_0`,\n" + + " minOrNull(`total_quantity`) AS `metric__minAgg_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740584933594))", }, { // [1] TestName: "Promotions tracking (request 1/3)", @@ -376,81 +375,74 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__2__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date"<= - fromUnixTimestamp64Milli(1740584933594)) AND "taxful_total_price" > '250') - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC`, - ExpectedAdditionalPancakeSQLs: []string{` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date" - <=fromUnixTimestamp64Milli(1740584933594)) AND "taxful_total_price" > '250') - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset( - toTimezone("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."order_date" AS "top_metrics__1__2__4_col_0", - "hit_table"."order_date" AS "top_metrics__1__2__4_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "order_date" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000)) - WHERE (("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date" - <=fromUnixTimestamp64Milli(1740584933594)) AND "taxful_total_price" > '250')) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__4_col_0", "top_metrics__1__2__4_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, - ` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date" - <=fromUnixTimestamp64Milli(1740584933594)) AND "taxful_total_price" > '250') - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset( - toTimezone("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."taxful_total_price" AS "top_metrics__1__2__5_col_0", - "hit_table"."order_date" AS "top_metrics__1__2__5_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "order_date" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000)) - WHERE (("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date" - <=fromUnixTimestamp64Milli(1740584933594)) AND "taxful_total_price" > '250')) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__5_col_0", "top_metrics__1__2__5_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<=fromUnixTimestamp64Milli(1740584933594)) AND `taxful_total_price` > '250')\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`\n" + + "ORDER BY `aggr__1__2__key_0` ASC", + ExpectedAdditionalPancakeSQLs: []string{ + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<=fromUnixTimestamp64Milli(1740584933594)) AND `taxful_total_price` > '250')\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(\n" + + " toTimezone(`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`order_date` AS `top_metrics__1__2__4_col_0`,\n" + + " `hit_table`.`order_date` AS `top_metrics__1__2__4_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `order_date` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000))\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<=fromUnixTimestamp64Milli(1740584933594)) AND `taxful_total_price` > '250'))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__4_col_0`, `top_metrics__1__2__4_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<=fromUnixTimestamp64Milli(1740584933594)) AND `taxful_total_price` > '250')\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(\n" + + " toTimezone(`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`taxful_total_price` AS `top_metrics__1__2__5_col_0`,\n" + + " `hit_table`.`order_date` AS `top_metrics__1__2__5_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `order_date` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000))\n" + + " WHERE ((`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<=fromUnixTimestamp64Milli(1740584933594)) AND `taxful_total_price` > '250'))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__5_col_0`, `top_metrics__1__2__5_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", }, ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { @@ -683,20 +675,19 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1-bucket__1-metric_col_0", 0.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - countIf("products.product_name" __quesma_match '%trouser%') AS - "aggr__0__1-bucket__count", - sumOrNullIf("taxful_total_price", "products.product_name" __quesma_match '%trouser%') - AS "metric__0__1-bucket__1-metric_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1739979776601) AND "order_date"<= - fromUnixTimestamp64Milli(1740584576601)) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " countIf(`products.product_name` __quesma_match '%trouser%') AS\n" + + " `aggr__0__1-bucket__count`,\n" + + " sumOrNullIf(`taxful_total_price`, `products.product_name` __quesma_match '%trouser%')\n" + + " AS `metric__0__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1739979776601) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740584576601))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [3] TestName: "Promotions tracking (request 3/3)", @@ -854,22 +845,21 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1-bucket__1-metric_col_0", 0.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - countIf(("products.product_name" __quesma_match '%cocktail' OR - "__quesma_fulltext_field_name" __quesma_match 'dress%')) AS - "aggr__0__1-bucket__count", - sumOrNullIf("taxful_total_price", ("products.product_name" __quesma_match '%cocktail' - OR "__quesma_fulltext_field_name" __quesma_match 'dress%')) AS - "metric__0__1-bucket__1-metric_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date"<= - fromUnixTimestamp64Milli(1740838898238)) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " countIf((`products.product_name` __quesma_match '%cocktail' OR\n" + + " `__quesma_fulltext_field_name` __quesma_match 'dress%')) AS\n" + + " `aggr__0__1-bucket__count`,\n" + + " sumOrNullIf(`taxful_total_price`, (`products.product_name` __quesma_match '%cocktail'\n" + + " OR `__quesma_fulltext_field_name` __quesma_match 'dress%')) AS\n" + + " `metric__0__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740838898238))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 43200000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [4] TestName: "Sum of revenue", @@ -962,11 +952,10 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__0_col_0", 77112.984375), }}, }, - ExpectedPancakeSQL: ` - SELECT sumOrNull("taxful_total_price") AS "metric__0_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date"<= - fromUnixTimestamp64Milli(1740584933594))`, + ExpectedPancakeSQL: "SELECT sumOrNull(`taxful_total_price`) AS `metric__0_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740584933594))", }, { // [5] TestName: "Median spending", @@ -1064,11 +1053,10 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__0_col_0", []float64{67.0}), }}, }, - ExpectedPancakeSQL: ` - SELECT quantiles(0.500000)("taxful_total_price") AS "metric__0_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date"<= - fromUnixTimestamp64Milli(1740584933594))`, + ExpectedPancakeSQL: "SELECT quantiles(0.500000)(`taxful_total_price`) AS `metric__0_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740584933594))", }, { // [6] TestName: "Avg. items sold", @@ -1161,11 +1149,10 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__0_col_0", 2.164569215876089), }}, }, - ExpectedPancakeSQL: ` - SELECT avgOrNull("total_quantity") AS "metric__0_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date"<= - fromUnixTimestamp64Milli(1740584933594))`, + ExpectedPancakeSQL: "SELECT avgOrNull(`total_quantity`) AS `metric__0_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740584933594))", }, { // [7] TestName: "TODO Transactions per day", @@ -1386,25 +1373,23 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__time_offset_split__0__2_col_0", 218.0), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__time_offset_split__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0", - count(*) AS "aggr__time_offset_split__0__count", - sumOrNull("products.quantity") AS "metric__time_offset_split__0__1_col_0", - sumOrNull("products.quantity") AS "metric__time_offset_split__0__2_col_0" - FROM __quesma_table_name - WHERE ((("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date" - <=fromUnixTimestamp64Milli(1740838898238)) OR ("order_date">= - fromUnixTimestamp64Milli(1739629298238) AND "order_date"<= - fromUnixTimestamp64Milli(1740234098238))) AND ("order_date">= - fromUnixTimestamp64Milli(1740234098238) AND "order_date"<= - fromUnixTimestamp64Milli(1740838898238))) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0" - ORDER BY "aggr__time_offset_split__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__time_offset_split__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`,\n" + + " count(*) AS `aggr__time_offset_split__0__count`,\n" + + " sumOrNull(`products.quantity`) AS `metric__time_offset_split__0__1_col_0`,\n" + + " sumOrNull(`products.quantity`) AS `metric__time_offset_split__0__2_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<=fromUnixTimestamp64Milli(1740838898238)) OR (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1739629298238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740234098238))) AND (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1740234098238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740838898238)))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`\n" + + "ORDER BY `aggr__time_offset_split__0__key_0` ASC", ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { {Cols: []model.QueryResultCol{ @@ -1416,25 +1401,24 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ }}, }, }, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT sum(count(*)) OVER () AS "aggr__time_offset_split__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0", - count(*) AS "aggr__time_offset_split__0__count", - sumOrNull("products.quantity") AS "metric__time_offset_split__0__1_col_0", - sumOrNull("products.quantity") AS "metric__time_offset_split__0__2_col_0" - FROM __quesma_table_name - WHERE ((("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date" - <=fromUnixTimestamp64Milli(1740838898238)) OR ("order_date">= - fromUnixTimestamp64Milli(1739629298238) AND "order_date"<= - fromUnixTimestamp64Milli(1740234098238))) AND ("order_date">= - fromUnixTimestamp64Milli(1739629298238) AND "order_date"<= - fromUnixTimestamp64Milli(1740234098238))) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0" - ORDER BY "aggr__time_offset_split__0__key_0" ASC`}, + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT sum(count(*)) OVER () AS `aggr__time_offset_split__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`,\n" + + " count(*) AS `aggr__time_offset_split__0__count`,\n" + + " sumOrNull(`products.quantity`) AS `metric__time_offset_split__0__1_col_0`,\n" + + " sumOrNull(`products.quantity`) AS `metric__time_offset_split__0__2_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<=fromUnixTimestamp64Milli(1740838898238)) OR (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1739629298238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740234098238))) AND (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1739629298238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740234098238)))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`\n" + + "ORDER BY `aggr__time_offset_split__0__key_0` ASC"}, }, { // [8] TestName: "TODO Daily comparison", @@ -1648,25 +1632,23 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__time_offset_split__0__2_col_0", 4033.34375), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__time_offset_split__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0", - count(*) AS "aggr__time_offset_split__0__count", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__1_col_0", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__2_col_0" - FROM __quesma_table_name - WHERE ((("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date" - <=fromUnixTimestamp64Milli(1740838898238)) OR ("order_date">= - fromUnixTimestamp64Milli(1739629298238) AND "order_date"<= - fromUnixTimestamp64Milli(1740234098238))) AND ("order_date">= - fromUnixTimestamp64Milli(1740234098238) AND "order_date"<= - fromUnixTimestamp64Milli(1740838898238))) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0" - ORDER BY "aggr__time_offset_split__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__time_offset_split__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`,\n" + + " count(*) AS `aggr__time_offset_split__0__count`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__1_col_0`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__2_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<=fromUnixTimestamp64Milli(1740838898238)) OR (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1739629298238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740234098238))) AND (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1740234098238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740838898238)))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`\n" + + "ORDER BY `aggr__time_offset_split__0__key_0` ASC", ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { {Cols: []model.QueryResultCol{ @@ -1685,26 +1667,24 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ }}, }, }, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT sum(count(*)) OVER () AS "aggr__time_offset_split__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0", - count(*) AS "aggr__time_offset_split__0__count", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__1_col_0", - sumOrNull("taxful_total_price") AS "metric__time_offset_split__0__2_col_0" - FROM __quesma_table_name - WHERE ((("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date" - <=fromUnixTimestamp64Milli(1740838898238)) OR ("order_date">= - fromUnixTimestamp64Milli(1739629298238) AND "order_date"<= - fromUnixTimestamp64Milli(1740234098238))) AND ("order_date">= - fromUnixTimestamp64Milli(1739629298238) AND "order_date"<= - fromUnixTimestamp64Milli(1740234098238))) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 86400000) AS - "aggr__time_offset_split__0__key_0" - ORDER BY "aggr__time_offset_split__0__key_0" ASC`, - }, + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT sum(count(*)) OVER () AS `aggr__time_offset_split__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`,\n" + + " count(*) AS `aggr__time_offset_split__0__count`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__1_col_0`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__time_offset_split__0__2_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<=fromUnixTimestamp64Milli(1740838898238)) OR (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1739629298238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740234098238))) AND (`order_date`>= \n" + + " fromUnixTimestamp64Milli(1739629298238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740234098238)))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS\n" + + " `aggr__time_offset_split__0__key_0`\n" + + "ORDER BY `aggr__time_offset_split__0__key_0` ASC"}, }, { // [9] TestName: "TODO Top products this/last week", @@ -2039,31 +2019,30 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__count", int64(52)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "category" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1740234098238) AND - "order_date"<=fromUnixTimestamp64Milli(1740838898238)) - GROUP BY "category" AS "aggr__0__key_0", - toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=11 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n" + + " `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `category` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND\n" + + " `order_date`<=fromUnixTimestamp64Milli(1740838898238))\n" + + " GROUP BY `category` AS `aggr__0__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [11] TestName: "% of target revenue ($10k)", @@ -2271,17 +2250,16 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1_col_0", 7541.5), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( - "order_date", 'Europe/Warsaw'))*1000) / 86400000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - sumOrNull("taxful_total_price") AS "metric__0__1_col_0" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1739980133594) AND "order_date"<= - fromUnixTimestamp64Milli(1740584933594)) - GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone - ("order_date", 'Europe/Warsaw'))*1000) / 86400000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone(\n" + + " `order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__0__1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1739980133594) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740584933594))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`order_date`)+timeZoneOffset(toTimezone\n" + + " (`order_date`, 'Europe/Warsaw'))*1000) / 86400000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [12] TestName: "Orders by Country (request 1/3)", @@ -2464,24 +2442,23 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__gridSplit__sum_of_taxful_total_price_col_0", 14978.84375), }}, }, - ExpectedPancakeSQL: ` - SELECT FLOOR(((__quesma_geo_lon("geoip.location")+180)/360)*POWER(2, 5)) AS "aggr__gridSplit__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("geoip.location")))+(1/COS(RADIANS( - __quesma_geo_lat("geoip.location")))))/PI())/2*POWER(2, 5)) AS "aggr__gridSplit__key_1", - count(*) AS "aggr__gridSplit__count", - avgOrNull(__quesma_geo_lat("geoip_location")) AS "metric__gridSplit__gridCentroid_col_0", - avgOrNull(__quesma_geo_lon("geoip_location")) AS "metric__gridSplit__gridCentroid_col_1", - count(*) AS "metric__gridSplit__gridCentroid_col_2", - sumOrNull("taxful_total_price") AS "metric__gridSplit__sum_of_taxful_total_price_col_0" - FROM __quesma_table_name - WHERE ("geoip.location" IS NOT NULL AND ("order_date">=fromUnixTimestamp64Milli( - 1740143222223) AND "order_date"<=fromUnixTimestamp64Milli(1740748022223))) - GROUP BY FLOOR(((__quesma_geo_lon("geoip.location")+180)/360)*POWER(2, 5)) AS "aggr__gridSplit__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("geoip.location")))+(1/COS(RADIANS( - __quesma_geo_lat("geoip.location")))))/PI())/2*POWER(2, 5)) AS "aggr__gridSplit__key_1" - ORDER BY "aggr__gridSplit__count" DESC, "aggr__gridSplit__key_0" ASC, - "aggr__gridSplit__key_1" ASC - LIMIT 65535`, + ExpectedPancakeSQL: "SELECT FLOOR(((__quesma_geo_lon(`geoip.location`)+180)/360)*POWER(2, 5)) AS `aggr__gridSplit__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`geoip.location`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`geoip.location`)))))/PI())/2*POWER(2, 5)) AS `aggr__gridSplit__key_1`,\n" + + " count(*) AS `aggr__gridSplit__count`,\n" + + " avgOrNull(__quesma_geo_lat(`geoip_location`)) AS `metric__gridSplit__gridCentroid_col_0`,\n" + + " avgOrNull(__quesma_geo_lon(`geoip_location`)) AS `metric__gridSplit__gridCentroid_col_1`,\n" + + " count(*) AS `metric__gridSplit__gridCentroid_col_2`,\n" + + " sumOrNull(`taxful_total_price`) AS `metric__gridSplit__sum_of_taxful_total_price_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`geoip.location` IS NOT NULL AND (`order_date`>=fromUnixTimestamp64Milli(\n" + + " 1740143222223) AND `order_date`<=fromUnixTimestamp64Milli(1740748022223)))\n" + + "GROUP BY FLOOR(((__quesma_geo_lon(`geoip.location`)+180)/360)*POWER(2, 5)) AS `aggr__gridSplit__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`geoip.location`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`geoip.location`)))))/PI())/2*POWER(2, 5)) AS `aggr__gridSplit__key_1`\n" + + "ORDER BY `aggr__gridSplit__count` DESC, `aggr__gridSplit__key_0` ASC,\n" + + " `aggr__gridSplit__key_1` ASC\n" + + "LIMIT 65535", }, { // [13] TestName: "Orders by Country (request 2/3)", @@ -2613,15 +2590,14 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("aggr__join__count", int64(5)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__join__parent_count", - "geoip.region_name" AS "aggr__join__key_0", count(*) AS "aggr__join__count" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date"<= - fromUnixTimestamp64Milli(1740838898238)) - GROUP BY "geoip.region_name" AS "aggr__join__key_0" - ORDER BY "aggr__join__count" DESC, "aggr__join__key_0" ASC - LIMIT 5`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__join__parent_count`,\n" + + " `geoip.region_name` AS `aggr__join__key_0`, count(*) AS `aggr__join__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740838898238))\n" + + "GROUP BY `geoip.region_name` AS `aggr__join__key_0`\n" + + "ORDER BY `aggr__join__count` DESC, `aggr__join__key_0` ASC\n" + + "LIMIT 5", }, { // [14] TestName: "Orders by Country (request 3/3)", @@ -2802,16 +2778,15 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("aggr__join__count", int64(41)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__join__parent_count", - "geoip.country_iso_code" AS "aggr__join__key_0", - count(*) AS "aggr__join__count" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1740234098238) AND "order_date"<= - fromUnixTimestamp64Milli(1740838898238)) - GROUP BY "geoip.country_iso_code" AS "aggr__join__key_0" - ORDER BY "aggr__join__count" DESC, "aggr__join__key_0" ASC - LIMIT 65536`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__join__parent_count`,\n" + + " `geoip.country_iso_code` AS `aggr__join__key_0`,\n" + + " count(*) AS `aggr__join__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1740234098238) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1740838898238))\n" + + "GROUP BY `geoip.country_iso_code` AS `aggr__join__key_0`\n" + + "ORDER BY `aggr__join__count` DESC, `aggr__join__key_0` ASC\n" + + "LIMIT 65536", }, { // [15] TestName: "weird", @@ -2915,16 +2890,15 @@ var KibanaSampleDataEcommerce = []AggregationTestCase{ model.NewQueryResultCol("metric__fitToBounds_col_3", 153.11700434423983), }}, }, - ExpectedPancakeSQL: ` - SELECT minOrNull(__quesma_geo_lon("originlocation")) AS - "metric__fitToBounds_col_0", - argMinOrNull(__quesma_geo_lat("originlocation"), __quesma_geo_lon( - "originlocation")) AS "metric__fitToBounds_col_1", - minOrNull(__quesma_geo_lat("originlocation")) AS "metric__fitToBounds_col_2", - argMinOrNull(__quesma_geo_lon("originlocation"), __quesma_geo_lat( - "originlocation")) AS "metric__fitToBounds_col_3" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740924992069) AND "timestamp"<= - fromUnixTimestamp64Milli(1741529792069))`, + ExpectedPancakeSQL: "SELECT minOrNull(__quesma_geo_lon(`originlocation`)) AS\n" + + " `metric__fitToBounds_col_0`,\n" + + " argMinOrNull(__quesma_geo_lat(`originlocation`), __quesma_geo_lon(\n" + + " `originlocation`)) AS `metric__fitToBounds_col_1`,\n" + + " minOrNull(__quesma_geo_lat(`originlocation`)) AS `metric__fitToBounds_col_2`,\n" + + " argMinOrNull(__quesma_geo_lon(`originlocation`), __quesma_geo_lat(\n" + + " `originlocation`)) AS `metric__fitToBounds_col_3`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740924992069) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1741529792069))", }, } diff --git a/platform/testdata/kibana_sample_data_flights.go b/platform/testdata/kibana_sample_data_flights.go index 845f13921..33fa932c7 100644 --- a/platform/testdata/kibana_sample_data_flights.go +++ b/platform/testdata/kibana_sample_data_flights.go @@ -103,12 +103,11 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("metric__minAgg_col_0", 100.14596557617188), }}, }, - ExpectedPancakeSQL: ` - SELECT maxOrNull("AvgTicketPrice") AS "metric__maxAgg_col_0", - minOrNull("AvgTicketPrice") AS "metric__minAgg_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853))`, + ExpectedPancakeSQL: "SELECT maxOrNull(`AvgTicketPrice`) AS `metric__maxAgg_col_0`,\n" + + " minOrNull(`AvgTicketPrice`) AS `metric__minAgg_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853))", }, { // [1] TestName: "fill out when panel starts missing - don't know which panel it is", @@ -217,12 +216,11 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("metric__minAgg_col_0", 15.0), }}, }, - ExpectedPancakeSQL: ` - SELECT maxOrNull("FlightDelayMin") AS "metric__maxAgg_col_0", - minOrNull("FlightDelayMin") AS "metric__minAgg_col_0" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND NOT ("FlightDelayMin" __quesma_match 0))`, + ExpectedPancakeSQL: "SELECT maxOrNull(`FlightDelayMin`) AS `metric__maxAgg_col_0`,\n" + + " minOrNull(`FlightDelayMin`) AS `metric__minAgg_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND NOT (`FlightDelayMin` __quesma_match 0))", }, { // [2] TestName: "Delays & Cancellations (request 1/2)", @@ -793,154 +791,150 @@ var KibanaSampleDataFlights = []AggregationTestCase{ }}, }, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' AND - "Cancelled" __quesma_match 'true')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC`, - ExpectedAdditionalPancakeSQLs: []string{` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."timestamp" AS "top_metrics__1__2__4_col_0", - "hit_table"."timestamp" AS "top_metrics__1__2__4_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "timestamp" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000)) - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true'))) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__4_col_0", "top_metrics__1__2__4_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, - ` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."FlightDelay" AS "top_metrics__1__2__5_col_0", - "hit_table"."timestamp" AS "top_metrics__1__2__5_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "timestamp" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000)) - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true'))) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__5_col_0", "top_metrics__1__2__5_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, - ` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."Cancelled" AS "top_metrics__1__2__6_col_0", - "hit_table"."timestamp" AS "top_metrics__1__2__6_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "timestamp" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000)) - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true'))) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__6_col_0", "top_metrics__1__2__6_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, - ` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."Carrier" AS "top_metrics__1__2__7_col_0", - "hit_table"."timestamp" AS "top_metrics__1__2__7_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "timestamp" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000)) - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND ("FlightDelay" __quesma_match 'true' - AND "Cancelled" __quesma_match 'true'))) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__7_col_0", "top_metrics__1__2__7_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true' AND\n" + + " `Cancelled` __quesma_match 'true'))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`\n" + + "ORDER BY `aggr__1__2__key_0` ASC", + ExpectedAdditionalPancakeSQLs: []string{ + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true'))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__4_col_0`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__4_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `timestamp` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000))\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true')))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__4_col_0`, `top_metrics__1__2__4_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true'))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`FlightDelay` AS `top_metrics__1__2__5_col_0`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__5_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `timestamp` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000))\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true')))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__5_col_0`, `top_metrics__1__2__5_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true'))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`Cancelled` AS `top_metrics__1__2__6_col_0`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__6_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `timestamp` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000))\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true')))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__6_col_0`, `top_metrics__1__2__6_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true'))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`Carrier` AS `top_metrics__1__2__7_col_0`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__7_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `timestamp` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000))\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND (`FlightDelay` __quesma_match 'true'\n" + + " AND `Cancelled` __quesma_match 'true')))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__7_col_0`, `top_metrics__1__2__7_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", }, }, { // [3] @@ -1052,11 +1046,10 @@ var KibanaSampleDataFlights = []AggregationTestCase{ }}, }, // TODO Sprawdz boola - ExpectedPancakeSQL: ` - SELECT countIf("FlightDelay" __quesma_match true) AS "metric__0-bucket_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853))`, + ExpectedPancakeSQL: "SELECT countIf(`FlightDelay` __quesma_match true) AS `metric__0-bucket_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853))", }, { // [4] TestName: "Cancelled", @@ -1166,11 +1159,10 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("metric__0-bucket_col_0", int64(278)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("Cancelled" __quesma_match true) AS "metric__0-bucket_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853))`, + ExpectedPancakeSQL: "SELECT countIf(`Cancelled` __quesma_match true) AS `metric__0-bucket_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853))", }, { // [5] TestName: "Delayed/Cancelled vs 1 week earlier", @@ -1323,18 +1315,16 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("filter_1__aggr__time_offset_split__count", int64(222)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND - "timestamp"<=fromUnixTimestamp64Milli(1740835408853))) AS - "filter_0__aggr__time_offset_split__count", - countIf(("timestamp">=fromUnixTimestamp64Milli(1739625808853) AND "timestamp" - <=fromUnixTimestamp64Milli(1740230608853))) AS - "filter_1__aggr__time_offset_split__count" - FROM __quesma_table_name - WHERE ("Cancelled" __quesma_match true AND (("timestamp">=fromUnixTimestamp64Milli( - 1740230608853) AND "timestamp"<=fromUnixTimestamp64Milli(1740835408853)) OR ( - "timestamp">=fromUnixTimestamp64Milli(1739625808853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740230608853))))`, + ExpectedPancakeSQL: "SELECT countIf((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND\n" + + " `timestamp`<=fromUnixTimestamp64Milli(1740835408853))) AS\n" + + " `filter_0__aggr__time_offset_split__count`,\n" + + " countIf((`timestamp`>=fromUnixTimestamp64Milli(1739625808853) AND `timestamp`<=fromUnixTimestamp64Milli(1740230608853))) AS\n" + + " `filter_1__aggr__time_offset_split__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`Cancelled` __quesma_match true AND ((`timestamp`>=fromUnixTimestamp64Milli(\n" + + " 1740230608853) AND `timestamp`<=fromUnixTimestamp64Milli(1740835408853)) OR (\n" + + " `timestamp`>=fromUnixTimestamp64Milli(1739625808853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740230608853))))", }, { // [6] TestName: "Flight count", @@ -1448,16 +1438,14 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", int64(31)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<=fromUnixTimestamp64Milli(1740835408853))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [7] TestName: "Delays & Cancellations (request 2/2)", @@ -1634,18 +1622,16 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1-bucket_col_0", int64(7)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - countIf("FlightDelay" __quesma_match 'true') AS "metric__0__1-bucket_col_0", - countIf("__quesma_fulltext_field_name" __quesma_match '%') AS "metric__0__2-bucket_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " countIf(`FlightDelay` __quesma_match 'true') AS `metric__0__1-bucket_col_0`,\n" + + " countIf(`__quesma_fulltext_field_name` __quesma_match '%') AS `metric__0__2-bucket_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<=fromUnixTimestamp64Milli(1740835408853))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [8] TestName: "Most delayed cities", @@ -1840,17 +1826,15 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("metric__0__1-bucket_col_0", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "OriginCityName" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - countIf("FlightDelay" __quesma_match true) AS "metric__0__1-bucket_col_0", - countIf("Cancelled" __quesma_match true) AS "metric__0__3-bucket_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) - GROUP BY "OriginCityName" AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC - LIMIT 1001`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `OriginCityName` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " countIf(`FlightDelay` __quesma_match true) AS `metric__0__1-bucket_col_0`,\n" + + " countIf(`Cancelled` __quesma_match true) AS `metric__0__3-bucket_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<=fromUnixTimestamp64Milli(1740835408853))\n" + + "GROUP BY `OriginCityName` AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC\n" + + "LIMIT 1001", }, { // [9] TestName: "Delay Type", @@ -2027,31 +2011,29 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__1__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count" - FROM ( - SELECT "aggr__0__parent_count", "aggr__0__key_0", "aggr__0__count", - "aggr__0__1__key_0", "aggr__0__1__count", - dense_rank() OVER (ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC) AS - "aggr__0__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__0__key_0" ORDER BY - "aggr__0__1__key_0" ASC) AS "aggr__0__1__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "FlightDelayType" AS "aggr__0__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0", - count(*) AS "aggr__0__1__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp" - <=fromUnixTimestamp64Milli(1740835408853)) - GROUP BY "FlightDelayType" AS "aggr__0__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) - WHERE "aggr__0__order_1_rank"<=11 - ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`\n" + + "FROM (\n" + + " SELECT `aggr__0__parent_count`, `aggr__0__key_0`, `aggr__0__count`,\n" + + " `aggr__0__1__key_0`, `aggr__0__1__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC) AS\n" + + " `aggr__0__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__0__key_0` ORDER BY\n" + + " `aggr__0__1__key_0` ASC) AS `aggr__0__1__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `FlightDelayType` AS `aggr__0__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__0__key_0`) AS `aggr__0__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__1__key_0`,\n" + + " count(*) AS `aggr__0__1__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<=fromUnixTimestamp64Milli(1740835408853))\n" + + " GROUP BY `FlightDelayType` AS `aggr__0__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__1__key_0`))\n" + + "WHERE `aggr__0__order_1_rank`<=11\n" + + "ORDER BY `aggr__0__order_1_rank` ASC, `aggr__0__1__order_1_rank` ASC", }, { // [10] TestName: "Count of records by DestWeather (bottom right)", @@ -2171,15 +2153,14 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", int64(373)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "DestWeather" AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) - GROUP BY "DestWeather" AS "aggr__0__key_0" - ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `DestWeather` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853))\n" + + "GROUP BY `DestWeather` AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 3", }, { // [11] TestName: "Delay Type", @@ -2305,16 +2286,15 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "FlightDelayType" AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND NOT ("FlightDelayType" - __quesma_match '%No Delay%')) - GROUP BY "FlightDelayType" AS "aggr__0__key_0" - ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `FlightDelayType` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND NOT (`FlightDelayType`\n" + + " __quesma_match '%No Delay%'))\n" + + "GROUP BY `FlightDelayType` AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 3", }, { // [12] TestName: "Origin Time Delayed", @@ -2496,25 +2476,24 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("metric__gridSplit__sum_of_FlightDelayMin_col_0", 1185.0), }}, }, - ExpectedPancakeSQL: ` - SELECT FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 7)) AS "aggr__gridSplit__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 7)) - AS "aggr__gridSplit__key_1", - count(*) AS "aggr__gridSplit__count", - avgOrNull(__quesma_geo_lat("originlocation")) AS "metric__gridSplit__gridCentroid_col_0", - avgOrNull(__quesma_geo_lon("originlocation")) AS "metric__gridSplit__gridCentroid_col_1", - count(*) AS "metric__gridSplit__gridCentroid_col_2", - sumOrNull("FlightDelayMin") AS "metric__gridSplit__sum_of_FlightDelayMin_col_0" - FROM __quesma_table_name - WHERE ("OriginLocation" IS NOT NULL AND ("timestamp">=fromUnixTimestamp64Milli( - 1740230608853) AND "timestamp"<=fromUnixTimestamp64Milli(1740835408853))) - GROUP BY FLOOR(((__quesma_geo_lon("OriginLocation")+180)/360)*POWER(2, 7)) AS "aggr__gridSplit__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("OriginLocation")))+(1/COS(RADIANS( - __quesma_geo_lat("OriginLocation")))))/PI())/2*POWER(2, 7)) AS "aggr__gridSplit__key_1" - ORDER BY "aggr__gridSplit__count" DESC, "aggr__gridSplit__key_0" ASC, - "aggr__gridSplit__key_1" ASC - LIMIT 65535`, + ExpectedPancakeSQL: "SELECT FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 7)) AS `aggr__gridSplit__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 7))\n" + + " AS `aggr__gridSplit__key_1`,\n" + + " count(*) AS `aggr__gridSplit__count`,\n" + + " avgOrNull(__quesma_geo_lat(`originlocation`)) AS `metric__gridSplit__gridCentroid_col_0`,\n" + + " avgOrNull(__quesma_geo_lon(`originlocation`)) AS `metric__gridSplit__gridCentroid_col_1`,\n" + + " count(*) AS `metric__gridSplit__gridCentroid_col_2`,\n" + + " sumOrNull(`FlightDelayMin`) AS `metric__gridSplit__sum_of_FlightDelayMin_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`OriginLocation` IS NOT NULL AND (`timestamp`>=fromUnixTimestamp64Milli(\n" + + " 1740230608853) AND `timestamp`<=fromUnixTimestamp64Milli(1740835408853)))\n" + + "GROUP BY FLOOR(((__quesma_geo_lon(`OriginLocation`)+180)/360)*POWER(2, 7)) AS `aggr__gridSplit__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`OriginLocation`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`OriginLocation`)))))/PI())/2*POWER(2, 7)) AS `aggr__gridSplit__key_1`\n" + + "ORDER BY `aggr__gridSplit__count` DESC, `aggr__gridSplit__key_0` ASC,\n" + + " `aggr__gridSplit__key_1` ASC\n" + + "LIMIT 65535", }, { // [13] TestName: "Delay Buckets", @@ -2630,13 +2609,12 @@ var KibanaSampleDataFlights = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", int64(32)), }}, }, - ExpectedPancakeSQL: ` - SELECT "FlightDelayMin" AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740230608853) AND "timestamp"<= - fromUnixTimestamp64Milli(1740835408853)) AND NOT ("FlightDelayMin" __quesma_match 0)) - GROUP BY "FlightDelayMin" AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `FlightDelayMin` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740230608853) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740835408853)) AND NOT (`FlightDelayMin` __quesma_match 0))\n" + + "GROUP BY `FlightDelayMin` AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [14] TestName: "TODO Airport Connections (Hover Over Airport)", @@ -2972,99 +2950,95 @@ var KibanaSampleDataFlights = []AggregationTestCase{ }}, }, }, - ExpectedPancakeSQL: ` - WITH quesma_top_hits_group_table AS ( - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "aggr__origins__distinations__parent_count", - "aggr__origins__distinations__key_0", "aggr__origins__distinations__count", - "aggr__origins__order_1_rank", "aggr__origins__distinations__order_1_rank" - FROM ( - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "aggr__origins__distinations__parent_count", - "aggr__origins__distinations__key_0", - "aggr__origins__distinations__count", - dense_rank() OVER (ORDER BY "aggr__origins__count" DESC, - "aggr__origins__key_0" ASC) AS "aggr__origins__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__origins__key_0" ORDER BY - "aggr__origins__distinations__count" DESC, - "aggr__origins__distinations__key_0" ASC) AS - "aggr__origins__distinations__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__origins__parent_count", - "OriginAirportID" AS "aggr__origins__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__origins__key_0") AS - "aggr__origins__count", - sum(count(*)) OVER (PARTITION BY "aggr__origins__key_0") AS - "aggr__origins__distinations__parent_count", - "DestAirportID" AS "aggr__origins__distinations__key_0", - count(*) AS "aggr__origins__distinations__count" - FROM __quesma_table_name - GROUP BY "OriginAirportID" AS "aggr__origins__key_0", - "DestAirportID" AS "aggr__origins__distinations__key_0")) - WHERE ("aggr__origins__order_1_rank"<=10001 AND - "aggr__origins__distinations__order_1_rank"<=10001) - ORDER BY "aggr__origins__order_1_rank" ASC, - "aggr__origins__distinations__order_1_rank" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__origins__parent_count" AS - "aggr__origins__parent_count", - "group_table"."aggr__origins__key_0" AS "aggr__origins__key_0", - "group_table"."aggr__origins__count" AS "aggr__origins__count", - "group_table"."aggr__origins__distinations__parent_count" AS - "aggr__origins__distinations__parent_count", - "group_table"."aggr__origins__distinations__key_0" AS - "aggr__origins__distinations__key_0", - "group_table"."aggr__origins__distinations__count" AS - "aggr__origins__distinations__count", - "hit_table"."DestLocation" AS - "top_hits__origins__distinations__destLocation_col_0", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__origins__key_0", - "group_table"."aggr__origins__distinations__key_0") AS "top_hits_rank", - "group_table"."aggr__origins__order_1_rank" AS "aggr__origins__order_1_rank" - , - "group_table"."aggr__origins__distinations__order_1_rank" AS - "aggr__origins__distinations__order_1_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON (("group_table"."aggr__origins__key_0" - ="hit_table"."OriginAirportID" AND - "group_table"."aggr__origins__distinations__key_0"= - "hit_table"."DestAirportID"))) - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "aggr__origins__distinations__parent_count", - "aggr__origins__distinations__key_0", "aggr__origins__distinations__count", - "top_hits__origins__distinations__destLocation_col_0", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=1 - ORDER BY "aggr__origins__order_1_rank" ASC, - "aggr__origins__distinations__order_1_rank" ASC, "top_hits_rank" ASC`, - ExpectedAdditionalPancakeSQLs: []string{` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__origins__parent_count", - "OriginAirportID" AS "aggr__origins__key_0", - count(*) AS "aggr__origins__count" - FROM __quesma_table_name - GROUP BY "OriginAirportID" AS "aggr__origins__key_0" - ORDER BY "aggr__origins__count" DESC, "aggr__origins__key_0" ASC - LIMIT 10001) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__origins__parent_count" AS - "aggr__origins__parent_count", - "group_table"."aggr__origins__key_0" AS "aggr__origins__key_0", - "group_table"."aggr__origins__count" AS "aggr__origins__count", - "hit_table"."OriginLocation" AS "top_hits__origins__originLocation_col_0", - "hit_table"."Origin" AS "top_hits__origins__originLocation_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__origins__key_0") AS - "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__origins__key_0"= - "hit_table"."OriginAirportID")) - SELECT "aggr__origins__parent_count", "aggr__origins__key_0", - "aggr__origins__count", "top_hits__origins__originLocation_col_0", - "top_hits__origins__originLocation_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=1 - ORDER BY "aggr__origins__count" DESC, "aggr__origins__key_0" ASC, - "top_hits_rank" ASC`, + ExpectedPancakeSQL: "WITH quesma_top_hits_group_table AS (\n" + + " SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `aggr__origins__distinations__parent_count`,\n" + + " `aggr__origins__distinations__key_0`, `aggr__origins__distinations__count`,\n" + + " `aggr__origins__order_1_rank`, `aggr__origins__distinations__order_1_rank`\n" + + " FROM (\n" + + " SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `aggr__origins__distinations__parent_count`,\n" + + " `aggr__origins__distinations__key_0`,\n" + + " `aggr__origins__distinations__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__origins__count` DESC,\n" + + " `aggr__origins__key_0` ASC) AS `aggr__origins__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__origins__key_0` ORDER BY\n" + + " `aggr__origins__distinations__count` DESC,\n" + + " `aggr__origins__distinations__key_0` ASC) AS\n" + + " `aggr__origins__distinations__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__origins__parent_count`,\n" + + " `OriginAirportID` AS `aggr__origins__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__origins__key_0`) AS\n" + + " `aggr__origins__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__origins__key_0`) AS\n" + + " `aggr__origins__distinations__parent_count`,\n" + + " `DestAirportID` AS `aggr__origins__distinations__key_0`,\n" + + " count(*) AS `aggr__origins__distinations__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `OriginAirportID` AS `aggr__origins__key_0`,\n" + + " `DestAirportID` AS `aggr__origins__distinations__key_0`))\n" + + " WHERE (`aggr__origins__order_1_rank`<=10001 AND\n" + + " `aggr__origins__distinations__order_1_rank`<=10001)\n" + + " ORDER BY `aggr__origins__order_1_rank` ASC,\n" + + " `aggr__origins__distinations__order_1_rank` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__origins__parent_count` AS\n" + + " `aggr__origins__parent_count`,\n" + + " `group_table`.`aggr__origins__key_0` AS `aggr__origins__key_0`,\n" + + " `group_table`.`aggr__origins__count` AS `aggr__origins__count`,\n" + + " `group_table`.`aggr__origins__distinations__parent_count` AS\n" + + " `aggr__origins__distinations__parent_count`,\n" + + " `group_table`.`aggr__origins__distinations__key_0` AS\n" + + " `aggr__origins__distinations__key_0`,\n" + + " `group_table`.`aggr__origins__distinations__count` AS\n" + + " `aggr__origins__distinations__count`,\n" + + " `hit_table`.`DestLocation` AS\n" + + " `top_hits__origins__distinations__destLocation_col_0`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__origins__key_0`,\n" + + " `group_table`.`aggr__origins__distinations__key_0`) AS `top_hits_rank`,\n" + + " `group_table`.`aggr__origins__order_1_rank` AS `aggr__origins__order_1_rank`\n" + + " ,\n" + + " `group_table`.`aggr__origins__distinations__order_1_rank` AS\n" + + " `aggr__origins__distinations__order_1_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON ((`group_table`.`aggr__origins__key_0`=`hit_table`.`OriginAirportID` AND\n" + + " `group_table`.`aggr__origins__distinations__key_0`=`hit_table`.`DestAirportID`)))\n" + + "SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `aggr__origins__distinations__parent_count`,\n" + + " `aggr__origins__distinations__key_0`, `aggr__origins__distinations__count`,\n" + + " `top_hits__origins__distinations__destLocation_col_0`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=1\n" + + "ORDER BY `aggr__origins__order_1_rank` ASC,\n" + + " `aggr__origins__distinations__order_1_rank` ASC, `top_hits_rank` ASC", + ExpectedAdditionalPancakeSQLs: []string{ + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__origins__parent_count`,\n" + + " `OriginAirportID` AS `aggr__origins__key_0`,\n" + + " count(*) AS `aggr__origins__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY `OriginAirportID` AS `aggr__origins__key_0`\n" + + " ORDER BY `aggr__origins__count` DESC, `aggr__origins__key_0` ASC\n" + + " LIMIT 10001) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__origins__parent_count` AS\n" + + " `aggr__origins__parent_count`,\n" + + " `group_table`.`aggr__origins__key_0` AS `aggr__origins__key_0`,\n" + + " `group_table`.`aggr__origins__count` AS `aggr__origins__count`,\n" + + " `hit_table`.`OriginLocation` AS `top_hits__origins__originLocation_col_0`,\n" + + " `hit_table`.`Origin` AS `top_hits__origins__originLocation_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__origins__key_0`) AS\n" + + " `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__origins__key_0`=`hit_table`.`OriginAirportID`))\n" + + "SELECT `aggr__origins__parent_count`, `aggr__origins__key_0`,\n" + + " `aggr__origins__count`, `top_hits__origins__originLocation_col_0`,\n" + + " `top_hits__origins__originLocation_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=1\n" + + "ORDER BY `aggr__origins__count` DESC, `aggr__origins__key_0` ASC,\n" + + " `top_hits_rank` ASC", }, }, } diff --git a/platform/testdata/kibana_sample_data_logs.go b/platform/testdata/kibana_sample_data_logs.go index 925a833b8..8f6181325 100644 --- a/platform/testdata/kibana_sample_data_logs.go +++ b/platform/testdata/kibana_sample_data_logs.go @@ -111,12 +111,11 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__minAgg_col_0", 0.0), }}, }, - ExpectedPancakeSQL: ` - SELECT maxOrNull("bytes") AS "metric__maxAgg_col_0", - minOrNull("bytes") AS "metric__minAgg_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103))`, + ExpectedPancakeSQL: "SELECT maxOrNull(`bytes`) AS `metric__maxAgg_col_0`,\n" + + " minOrNull(`bytes`) AS `metric__minAgg_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))", }, { // [1] TestName: "Response Codes Over Time + Annotations (1/2 request, Annotations part)", @@ -341,87 +340,84 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("aggr__1__2__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) AND ("tags" __quesma_match 'error' AND - "tags" __quesma_match 'security')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103)) AND (`tags` __quesma_match 'error' AND\n" + + " `tags` __quesma_match 'security'))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`\n" + + "ORDER BY `aggr__1__2__key_0` ASC", ExpectedAdditionalPancakeSQLs: []string{ - ` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) AND ("tags" __quesma_match 'error' AND - "tags" __quesma_match 'security')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."timestamp" AS "top_metrics__1__2__4_col_0", - "hit_table"."timestamp" AS "top_metrics__1__2__4_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "timestamp" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000)) - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) AND ("tags" __quesma_match 'error' AND - "tags" __quesma_match 'security'))) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__4_col_0", "top_metrics__1__2__4_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, - ` - WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "aggr__1__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__1__2__key_0", - count(*) AS "aggr__1__2__count" - FROM __quesma_table_name - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) AND ("tags" __quesma_match 'error' AND - "tags" __quesma_match 'security')) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( - toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS - "aggr__1__2__key_0" - ORDER BY "aggr__1__2__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."aggr__1__count" AS "aggr__1__count", - "group_table"."aggr__1__2__key_0" AS "aggr__1__2__key_0", - "group_table"."aggr__1__2__count" AS "aggr__1__2__count", - "hit_table"."geo.src" AS "top_metrics__1__2__5_col_0", - "hit_table"."timestamp" AS "top_metrics__1__2__5_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__1__2__key_0" ORDER BY - "timestamp" ASC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000)) - WHERE (("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) AND ("tags" __quesma_match 'error' AND - "tags" __quesma_match 'security'))) - SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", - "top_metrics__1__2__5_col_0", "top_metrics__1__2__5_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=10 - ORDER BY "aggr__1__2__key_0" ASC, "top_hits_rank" ASC`, + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103)) AND (`tags` __quesma_match 'error'\n" + + " AND `tags` __quesma_match 'security'))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__4_col_0`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__4_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `timestamp` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000))\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103)) AND (`tags` __quesma_match 'error'\n" + + " AND `tags` __quesma_match 'security')))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__4_col_0`, `top_metrics__1__2__4_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", + " WITH quesma_top_hits_group_table AS (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__1__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__1__2__key_0`,\n" + + " count(*) AS `aggr__1__2__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103)) AND (`tags` __quesma_match 'error'\n" + + " AND `tags` __quesma_match 'security'))\n" + + " GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(\n" + + " toTimezone(`timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS\n" + + " `aggr__1__2__key_0`\n" + + " ORDER BY `aggr__1__2__key_0` ASC) ,\n" + + "quesma_top_hits_join AS (\n" + + " SELECT `group_table`.`aggr__1__count` AS `aggr__1__count`,\n" + + " `group_table`.`aggr__1__2__key_0` AS `aggr__1__2__key_0`,\n" + + " `group_table`.`aggr__1__2__count` AS `aggr__1__2__count`,\n" + + " `hit_table`.`geo.src` AS `top_metrics__1__2__5_col_0`,\n" + + " `hit_table`.`timestamp` AS `top_metrics__1__2__5_col_1`,\n" + + " ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__1__2__key_0` ORDER BY\n" + + " `timestamp` ASC) AS `top_hits_rank`\n" + + " FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN\n" + + " `__quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__1__2__key_0`=\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000))\n" + + " WHERE ((`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103)) AND (`tags` __quesma_match 'error'\n" + + " AND `tags` __quesma_match 'security')))\n" + + "SELECT `aggr__1__count`, `aggr__1__2__key_0`, `aggr__1__2__count`,\n" + + " `top_metrics__1__2__5_col_0`, `top_metrics__1__2__5_col_1`, `top_hits_rank`\n" + + "FROM `quesma_top_hits_join`\n" + + "WHERE `top_hits_rank`<=10\n" + + "ORDER BY `aggr__1__2__key_0` ASC, `top_hits_rank` ASC", }, ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { @@ -560,11 +556,10 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__0_col_0", int64(833)), }}, }, - ExpectedPancakeSQL: ` - SELECT uniq("clientip") AS "metric__0_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103))`, + ExpectedPancakeSQL: "SELECT uniq(`clientip`) AS `metric__0_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))", }, { // [3] TestName: "Response Codes Over Time + Annotations (2/2 request)", @@ -853,20 +848,18 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("filter_2__aggr__0__1__count", int64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count", - countIf(("response">=200 AND "response"<400)) AS "filter_0__aggr__0__1__count" - , - countIf(("response">=400 AND "response"<500)) AS "filter_1__aggr__0__1__count" - , countIf("response">=500) AS "filter_2__aggr__0__1__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`,\n" + + " countIf((`response`>=200 AND `response`<400)) AS `filter_0__aggr__0__1__count`\n" + + " ,\n" + + " countIf((`response`>=400 AND `response`<500)) AS `filter_1__aggr__0__1__count`\n" + + " , countIf(`response`>=500) AS `filter_2__aggr__0__1__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<=fromUnixTimestamp64Milli(1740831278103))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 10800000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, { // [4] TestName: "HTTP 5xx", @@ -986,11 +979,10 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__0-bucket_col_0", int64(63)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("response">=500) AS "metric__0-bucket_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103))`, + ExpectedPancakeSQL: "SELECT countIf(`response`>=500) AS `metric__0-bucket_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))", }, { // [5] TestName: "HTTP 4xx", @@ -1130,11 +1122,10 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__0-bucket_col_0", int64(72)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("response">=400 AND "response"<500)) AS "metric__0-bucket_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103))`, + ExpectedPancakeSQL: "SELECT countIf((`response`>=400 AND `response`<500)) AS `metric__0-bucket_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))", }, { // [6] TestName: "Table gz, css, zip, etc.", @@ -1363,38 +1354,33 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__0__4-bucket__4-metric_col_0", 0), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "extension" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - sumOrNull("bytes") AS "metric__0__1_col_0", - uniq("clientip") AS "metric__0__3_col_0", - countIf(("timestamp">=fromUnixTimestamp64Milli(1740749972445) AND "timestamp" - <=fromUnixTimestamp64Milli(1740753572445))) AS "aggr__0__2-bucket__count", - sumOrNullIf("bytes", ("timestamp">=fromUnixTimestamp64Milli(1740749972445) AND - "timestamp"<=fromUnixTimestamp64Milli(1740753572445))) AS - "metric__0__2-bucket__2-metric_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740092400000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740753572445)) - GROUP BY "extension" AS "aggr__0__key_0" - ORDER BY "metric__0__1_col_0" DESC, "aggr__0__key_0" ASC - LIMIT 11`, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "extension" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - sumOrNull("bytes") AS "metric__0__1_col_0", - uniq("clientip") AS "metric__0__3_col_0", - countIf(("timestamp">=fromUnixTimestamp64Milli(1740749972445) AND "timestamp" - <=fromUnixTimestamp64Milli(1740753572445))) AS "aggr__0__4-bucket__count", - uniqIf("clientip", ("timestamp">=fromUnixTimestamp64Milli(1740749972445) AND - "timestamp"<=fromUnixTimestamp64Milli(1740753572445))) AS - "metric__0__4-bucket__4-metric_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740092400000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740753572445)) - GROUP BY "extension" AS "aggr__0__key_0" - ORDER BY "metric__0__1_col_0" DESC, "aggr__0__key_0" ASC - LIMIT 11`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `extension` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " sumOrNull(`bytes`) AS `metric__0__1_col_0`,\n" + + " uniq(`clientip`) AS `metric__0__3_col_0`,\n" + + " countIf((`timestamp`>=fromUnixTimestamp64Milli(1740749972445) AND `timestamp`<=fromUnixTimestamp64Milli(1740753572445))) AS `aggr__0__2-bucket__count`,\n" + + " sumOrNullIf(`bytes`, (`timestamp`>=fromUnixTimestamp64Milli(1740749972445) AND\n" + + " `timestamp`<=fromUnixTimestamp64Milli(1740753572445))) AS\n" + + " `metric__0__2-bucket__2-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740092400000) AND `timestamp`<=fromUnixTimestamp64Milli(1740753572445))\n" + + "GROUP BY `extension` AS `aggr__0__key_0`\n" + + "ORDER BY `metric__0__1_col_0` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 11", + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `extension` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " sumOrNull(`bytes`) AS `metric__0__1_col_0`,\n" + + " uniq(`clientip`) AS `metric__0__3_col_0`,\n" + + " countIf((`timestamp`>=fromUnixTimestamp64Milli(1740749972445) AND `timestamp`<=fromUnixTimestamp64Milli(1740753572445))) AS `aggr__0__4-bucket__count`,\n" + + " uniqIf(`clientip`, (`timestamp`>=fromUnixTimestamp64Milli(1740749972445) AND\n" + + " `timestamp`<=fromUnixTimestamp64Milli(1740753572445))) AS\n" + + " `metric__0__4-bucket__4-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740092400000) AND `timestamp`<=fromUnixTimestamp64Milli(1740753572445))\n" + + "GROUP BY `extension` AS `aggr__0__key_0`\n" + + "ORDER BY `metric__0__1_col_0` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 11", }, ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { @@ -1680,20 +1666,19 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__0__8_col_0", []float64{5929.5}), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", - "url" AS "aggr__0__key_0", count(*) AS "aggr__0__count", - uniq("clientip") AS "metric__0__2_col_0", - countIf("response">=500) AS "metric__0__3-bucket_col_0", - countIf(("response">=400 AND "response"<500)) AS "metric__0__5-bucket_col_0", - quantiles(0.950000)("bytes") AS "metric__0__7_col_0", - quantiles(0.500000)("bytes") AS "metric__0__8_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) - GROUP BY "url" AS "aggr__0__key_0" - ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC - LIMIT 1001`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__0__parent_count`,\n" + + " `url` AS `aggr__0__key_0`, count(*) AS `aggr__0__count`,\n" + + " uniq(`clientip`) AS `metric__0__2_col_0`,\n" + + " countIf(`response`>=500) AS `metric__0__3-bucket_col_0`,\n" + + " countIf((`response`>=400 AND `response`<500)) AS `metric__0__5-bucket_col_0`,\n" + + " quantiles(0.950000)(`bytes`) AS `metric__0__7_col_0`,\n" + + " quantiles(0.500000)(`bytes`) AS `metric__0__8_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))\n" + + "GROUP BY `url` AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__count` DESC, `aggr__0__key_0` ASC\n" + + "LIMIT 1001", }, { // [8] TestName: "Total Requests and Bytes (1/2 request)", @@ -1883,28 +1868,26 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__gridSplit__sum_of_bytes_col_0", float64(450382.0)), }}, }, - ExpectedPancakeSQL: ` - SELECT FLOOR(((__quesma_geo_lon("geo.coordinates")+180)/360)*POWER(2, 6)) - AS "aggr__gridSplit__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("geo.coordinates")))+(1/COS(RADIANS( - __quesma_geo_lat("geo.coordinates")))))/PI())/2*POWER(2, 6)) - AS "aggr__gridSplit__key_1", count(*) AS "aggr__gridSplit__count", - avgOrNull(__quesma_geo_lat("geo_coordinates")) AS - "metric__gridSplit__gridCentroid_col_0", - avgOrNull(__quesma_geo_lon("geo_coordinates")) AS - "metric__gridSplit__gridCentroid_col_1", - count(*) AS "metric__gridSplit__gridCentroid_col_2", - sumOrNull("bytes") AS "metric__gridSplit__sum_of_bytes_col_0" - FROM __quesma_table_name - WHERE ("geo.coordinates" IS NOT NULL AND ("timestamp">=fromUnixTimestamp64Milli( - 1740178800000) AND "timestamp"<=fromUnixTimestamp64Milli(1740831278103))) - GROUP BY FLOOR(((__quesma_geo_lon("geo.coordinates")+180)/360)*POWER(2, 6)) - AS "aggr__gridSplit__key_0", - FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat("geo.coordinates")))+(1/COS( - RADIANS(__quesma_geo_lat("geo.coordinates")))))/PI())/2*POWER(2, 6)) AS "aggr__gridSplit__key_1" - ORDER BY "aggr__gridSplit__count" DESC, "aggr__gridSplit__key_0" ASC, - "aggr__gridSplit__key_1" ASC - LIMIT 65535`, + ExpectedPancakeSQL: "SELECT FLOOR(((__quesma_geo_lon(`geo.coordinates`)+180)/360)*POWER(2, 6))\n" + + " AS `aggr__gridSplit__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`geo.coordinates`)))+(1/COS(RADIANS(\n" + + " __quesma_geo_lat(`geo.coordinates`)))))/PI())/2*POWER(2, 6)) AS `aggr__gridSplit__key_1`, count(*) AS `aggr__gridSplit__count`,\n" + + " avgOrNull(__quesma_geo_lat(`geo_coordinates`)) AS\n" + + " `metric__gridSplit__gridCentroid_col_0`,\n" + + " avgOrNull(__quesma_geo_lon(`geo_coordinates`)) AS\n" + + " `metric__gridSplit__gridCentroid_col_1`,\n" + + " count(*) AS `metric__gridSplit__gridCentroid_col_2`,\n" + + " sumOrNull(`bytes`) AS `metric__gridSplit__sum_of_bytes_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`geo.coordinates` IS NOT NULL AND (`timestamp`>=fromUnixTimestamp64Milli(\n" + + " 1740178800000) AND `timestamp`<=fromUnixTimestamp64Milli(1740831278103)))\n" + + "GROUP BY FLOOR(((__quesma_geo_lon(`geo.coordinates`)+180)/360)*POWER(2, 6))\n" + + " AS `aggr__gridSplit__key_0`,\n" + + " FLOOR((1-LOG(TAN(RADIANS(__quesma_geo_lat(`geo.coordinates`)))+(1/COS(\n" + + " RADIANS(__quesma_geo_lat(`geo.coordinates`)))))/PI())/2*POWER(2, 6)) AS `aggr__gridSplit__key_1`\n" + + "ORDER BY `aggr__gridSplit__count` DESC, `aggr__gridSplit__key_0` ASC,\n" + + " `aggr__gridSplit__key_1` ASC\n" + + "LIMIT 65535", }, { // [9] TestName: "Total Requests and Bytes (2/2 request)", @@ -2020,15 +2003,14 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("aggr__join__count", int64(260)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__join__parent_count", - "geo.dest" AS "aggr__join__key_0", count(*) AS "aggr__join__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) - GROUP BY "geo.dest" AS "aggr__join__key_0" - ORDER BY "aggr__join__count" DESC, "aggr__join__key_0" ASC - LIMIT 65536`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__join__parent_count`,\n" + + " `geo.dest` AS `aggr__join__key_0`, count(*) AS `aggr__join__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))\n" + + "GROUP BY `geo.dest` AS `aggr__join__key_0`\n" + + "ORDER BY `aggr__join__count` DESC, `aggr__join__key_0` ASC\n" + + "LIMIT 65536", }, { // [10] TestName: "Unique Destination Heatmap", @@ -2191,35 +2173,34 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("metric__countries__hours__unique_col_0", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__countries__parent_count", "aggr__countries__key_0", - "aggr__countries__count", "aggr__countries__hours__key_0", - "aggr__countries__hours__count", "metric__countries__hours__unique_col_0" - FROM ( - SELECT "aggr__countries__parent_count", "aggr__countries__key_0", - "aggr__countries__count", "aggr__countries__hours__key_0", - "aggr__countries__hours__count", "metric__countries__hours__unique_col_0", - dense_rank() OVER (ORDER BY "aggr__countries__count" DESC, - "aggr__countries__key_0" ASC) AS "aggr__countries__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__countries__key_0" ORDER BY - "aggr__countries__hours__key_0" ASC) AS - "aggr__countries__hours__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__countries__parent_count", - "geo.dest" AS "aggr__countries__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__countries__key_0") AS - "aggr__countries__count", - "hour_of_day" AS "aggr__countries__hours__key_0", - count(*) AS "aggr__countries__hours__count", - uniq("clientip") AS "metric__countries__hours__unique_col_0" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1740178800000) AND - "@timestamp"<=fromUnixTimestamp64Milli(1740831278103)) - GROUP BY "geo.dest" AS "aggr__countries__key_0", - "hour_of_day" AS "aggr__countries__hours__key_0")) - WHERE "aggr__countries__order_1_rank"<=26 - ORDER BY "aggr__countries__order_1_rank" ASC, - "aggr__countries__hours__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__countries__parent_count`, `aggr__countries__key_0`,\n" + + " `aggr__countries__count`, `aggr__countries__hours__key_0`,\n" + + " `aggr__countries__hours__count`, `metric__countries__hours__unique_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__countries__parent_count`, `aggr__countries__key_0`,\n" + + " `aggr__countries__count`, `aggr__countries__hours__key_0`,\n" + + " `aggr__countries__hours__count`, `metric__countries__hours__unique_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__countries__count` DESC,\n" + + " `aggr__countries__key_0` ASC) AS `aggr__countries__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__countries__key_0` ORDER BY\n" + + " `aggr__countries__hours__key_0` ASC) AS\n" + + " `aggr__countries__hours__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__countries__parent_count`,\n" + + " `geo.dest` AS `aggr__countries__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__countries__key_0`) AS\n" + + " `aggr__countries__count`,\n" + + " `hour_of_day` AS `aggr__countries__hours__key_0`,\n" + + " count(*) AS `aggr__countries__hours__count`,\n" + + " uniq(`clientip`) AS `metric__countries__hours__unique_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND\n" + + " `@timestamp`<=fromUnixTimestamp64Milli(1740831278103))\n" + + " GROUP BY `geo.dest` AS `aggr__countries__key_0`,\n" + + " `hour_of_day` AS `aggr__countries__hours__key_0`))\n" + + "WHERE `aggr__countries__order_1_rank`<=26\n" + + "ORDER BY `aggr__countries__order_1_rank` ASC,\n" + + " `aggr__countries__hours__order_1_rank` ASC", }, { // [11] TestName: "TODO Machine OS and Destination Sankey Chart", @@ -2345,17 +2326,16 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("aggr__table__count", int64(11)), }}, }, - ExpectedPancakeSQL: ` - SELECT "machine.os" AS "aggr__table__key_0", "geo.dest" AS "aggr__table__key_1", - count(*) AS "aggr__table__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) - GROUP BY "machine.os" AS "aggr__table__key_0", - "geo.dest" AS "aggr__table__key_1" - ORDER BY "aggr__table__count" DESC, "aggr__table__key_0" ASC, - "aggr__table__key_1" ASC - LIMIT 3`, + ExpectedPancakeSQL: "SELECT `machine.os` AS `aggr__table__key_0`, `geo.dest` AS `aggr__table__key_1`,\n" + + " count(*) AS `aggr__table__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))\n" + + "GROUP BY `machine.os` AS `aggr__table__key_0`,\n" + + " `geo.dest` AS `aggr__table__key_1`\n" + + "ORDER BY `aggr__table__count` DESC, `aggr__table__key_0` ASC,\n" + + " `aggr__table__key_1` ASC\n" + + "LIMIT 3", }, { // [12] TestName: "Bytes distribution", @@ -2479,12 +2459,11 @@ var KibanaSampleDataLogs = []AggregationTestCase{ model.NewQueryResultCol("aggr__0__count", int64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("bytes"/50)*50 AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1740178800000) AND "timestamp"<= - fromUnixTimestamp64Milli(1740831278103)) - GROUP BY floor("bytes"/50)*50 AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`bytes`/50)*50 AS `aggr__0__key_0`, count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1740178800000) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1740831278103))\n" + + "GROUP BY floor(`bytes`/50)*50 AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, } diff --git a/platform/testdata/opensearch-visualize/aggregation_requests.go b/platform/testdata/opensearch-visualize/aggregation_requests.go index efb3dc528..8b71bec0c 100644 --- a/platform/testdata/opensearch-visualize/aggregation_requests.go +++ b/platform/testdata/opensearch-visualize/aggregation_requests.go @@ -129,16 +129,15 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_1__metric__2__1_col_0", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("ftd_session_time"<1000) AS "range_0__aggr__2__count", - uniqIf("ftd_session_time", "ftd_session_time"<1000) AS - "range_0__metric__2__1_col_0", - countIf("ftd_session_time">=-100) AS "range_1__aggr__2__count", - uniqIf("ftd_session_time", "ftd_session_time">=-100) AS - "range_1__metric__2__1_col_0" - FROM ` + TableName + ` - WHERE ("epoch_time">='2024-04-27T14:25:59.383Z' AND "epoch_time"<= - '2024-04-27T14:40:59.383Z')`, + ExpectedPancakeSQL: "SELECT countIf(`ftd_session_time`<1000) AS `range_0__aggr__2__count`,\n" + + " uniqIf(`ftd_session_time`, `ftd_session_time`<1000) AS\n" + + " `range_0__metric__2__1_col_0`,\n" + + " countIf(`ftd_session_time`>=-100) AS `range_1__aggr__2__count`,\n" + + " uniqIf(`ftd_session_time`, `ftd_session_time`>=-100) AS\n" + + " `range_1__metric__2__1_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`epoch_time`>='2024-04-27T14:25:59.383Z' AND `epoch_time`<=\n" + + " '2024-04-27T14:40:59.383Z')", }, { // [1] TestName: "Range with subaggregations. Reproduce: Visualize -> Pie chart -> Aggregation: Top Hit, Buckets: Aggregation: Range", @@ -319,12 +318,11 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_1__aggr__2__count", uint64(1880)), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("properties.entry_time"<1000) AS "range_0__aggr__2__count", - countIf("properties.entry_time">=-100) AS "range_1__aggr__2__count" - FROM __quesma_table_name - WHERE ("epoch_time">='2024-04-27T14:38:33.527Z' AND "epoch_time"<= - '2024-04-27T14:53:33.527Z')`, + ExpectedPancakeSQL: "SELECT countIf(`properties.entry_time`<1000) AS `range_0__aggr__2__count`,\n" + + " countIf(`properties.entry_time`>=-100) AS `range_1__aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`epoch_time`>='2024-04-27T14:38:33.527Z' AND `epoch_time`<=\n" + + " '2024-04-27T14:53:33.527Z')", ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ {{}}, // 0 results { @@ -332,17 +330,17 @@ var AggregationTests = []testdata.AggregationTestCase{ {Cols: []model.QueryResultCol{model.NewQueryResultCol("top_hits__2__1_col_0", uint64(1704129696028))}}, }, }, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT "properties.entry_time" AS "top_hits__2__1_col_0" - FROM __quesma_table_name - WHERE ("properties.entry_time"<1000 AND ("epoch_time">= - '2024-04-27T14:38:33.527Z' AND "epoch_time"<='2024-04-27T14:53:33.527Z')) - LIMIT 2`, ` - SELECT "properties.entry_time" AS "top_hits__2__1_col_0" - FROM __quesma_table_name - WHERE ("properties.entry_time">=-100 AND ("epoch_time">= - '2024-04-27T14:38:33.527Z' AND "epoch_time"<='2024-04-27T14:53:33.527Z')) - LIMIT 2`, + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT `properties.entry_time` AS `top_hits__2__1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`properties.entry_time`<1000 AND (`epoch_time`>=\n" + + " '2024-04-27T14:38:33.527Z' AND `epoch_time`<='2024-04-27T14:53:33.527Z'))\n" + + "LIMIT 2", + "SELECT `properties.entry_time` AS `top_hits__2__1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`properties.entry_time`>=-100 AND (`epoch_time`>=\n" + + " '2024-04-27T14:38:33.527Z' AND `epoch_time`<='2024-04-27T14:53:33.527Z'))\n" + + "LIMIT 2", }, }, { // [2] @@ -475,17 +473,16 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_1__metric__2__1_col_0", 7460679809210584.0), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("epoch_time_original">=0 AND "epoch_time_original"<1000)) AS - "range_0__aggr__2__count", - sumOrNullIf("properties.entry_time", ("epoch_time_original">=0 AND - "epoch_time_original"<1000)) AS "range_0__metric__2__1_col_0", - countIf("epoch_time_original">=1000) AS "range_1__aggr__2__count", - sumOrNullIf("properties.entry_time", "epoch_time_original">=1000) AS - "range_1__metric__2__1_col_0" - FROM ` + TableName + ` - WHERE ("epoch_time">='2024-04-28T14:34:22.674Z' AND "epoch_time"<= - '2024-04-28T14:49:22.674Z')`, + ExpectedPancakeSQL: "SELECT countIf((`epoch_time_original`>=0 AND `epoch_time_original`<1000)) AS\n" + + " `range_0__aggr__2__count`,\n" + + " sumOrNullIf(`properties.entry_time`, (`epoch_time_original`>=0 AND\n" + + " `epoch_time_original`<1000)) AS `range_0__metric__2__1_col_0`,\n" + + " countIf(`epoch_time_original`>=1000) AS `range_1__aggr__2__count`,\n" + + " sumOrNullIf(`properties.entry_time`, `epoch_time_original`>=1000) AS\n" + + " `range_1__metric__2__1_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`epoch_time`>='2024-04-28T14:34:22.674Z' AND `epoch_time`<=\n" + + " '2024-04-28T14:49:22.674Z')", }, { // [3] TestName: "Range with subaggregations. Reproduce: Visualize -> Heat Map -> Metrics: Median, Buckets: X-Asis Range", @@ -614,24 +611,23 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("range_1__metric__2__1_col_0", []float64{math.NaN()}), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf(("properties::exoestimation_connection_speedinkbps">=0 AND - "properties::exoestimation_connection_speedinkbps"<1000)) AS - "range_0__aggr__2__count", - quantilesIf(0.500000)("properties::entry_time", ( - "properties::exoestimation_connection_speedinkbps">=0 AND - "properties::exoestimation_connection_speedinkbps"<1000)) AS - "range_0__metric__2__1_col_0", - countIf(("properties::exoestimation_connection_speedinkbps">=1000 AND - "properties::exoestimation_connection_speedinkbps"<2000)) AS - "range_1__aggr__2__count", - quantilesIf(0.500000)("properties::entry_time", ( - "properties::exoestimation_connection_speedinkbps">=1000 AND - "properties::exoestimation_connection_speedinkbps"<2000)) AS - "range_1__metric__2__1_col_0" - FROM ` + TableName + ` - WHERE ("epoch_time">='2024-04-18T04:40:12.252Z' AND "epoch_time"<= - '2024-05-03T04:40:12.252Z')`, + ExpectedPancakeSQL: "SELECT countIf((`properties::exoestimation_connection_speedinkbps`>=0 AND\n" + + " `properties::exoestimation_connection_speedinkbps`<1000)) AS\n" + + " `range_0__aggr__2__count`,\n" + + " quantilesIf(0.500000)(`properties::entry_time`, (\n" + + " `properties::exoestimation_connection_speedinkbps`>=0 AND\n" + + " `properties::exoestimation_connection_speedinkbps`<1000)) AS\n" + + " `range_0__metric__2__1_col_0`,\n" + + " countIf((`properties::exoestimation_connection_speedinkbps`>=1000 AND\n" + + " `properties::exoestimation_connection_speedinkbps`<2000)) AS\n" + + " `range_1__aggr__2__count`,\n" + + " quantilesIf(0.500000)(`properties::entry_time`, (\n" + + " `properties::exoestimation_connection_speedinkbps`>=1000 AND\n" + + " `properties::exoestimation_connection_speedinkbps`<2000)) AS\n" + + " `range_1__metric__2__1_col_0`\n" + + "FROM `" + TableName + "`\n" + + "WHERE (`epoch_time`>='2024-04-18T04:40:12.252Z' AND `epoch_time`<=\n" + + " '2024-05-03T04:40:12.252Z')", }, { // [4] TestName: "Max on DateTime field. Reproduce: Visualize -> Line: Metrics -> Max @timestamp, Buckets: Add X-Asis, Aggregation: Significant Terms", @@ -766,16 +762,15 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1_col_0", util.ParseTime("2024-05-02T15:59:12.949Z")), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "response" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - maxOrNull("timestamp") AS "metric__2__1_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401399517) AND "timestamp"<= - fromUnixTimestamp64Milli(1714697399517)) - GROUP BY "response" AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `response` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " maxOrNull(`timestamp`) AS `metric__2__1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1713401399517) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1714697399517))\n" + + "GROUP BY `response` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 4", }, { // [5] TestName: "Min on DateTime field. Reproduce: Visualize -> Line: Metrics -> Min @timestamp, Buckets: Add X-Asis, Aggregation: Significant Terms", @@ -910,16 +905,15 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1_col_0", util.ParseTime("2024-04-21T03:30:25.131Z")), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "response" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - minOrNull("timestamp") AS "metric__2__1_col_0" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401460471) AND "timestamp"<= - fromUnixTimestamp64Milli(1714697460471)) - GROUP BY "response" AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `response` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " minOrNull(`timestamp`) AS `metric__2__1_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1713401460471) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1714697460471))\n" + + "GROUP BY `response` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 4", }, { // [6] TestName: "Percentiles on DateTime field. Reproduce: Visualize -> Line: Metrics -> Percentiles (or Median, it's the same aggregation) @timestamp, Buckets: Add X-Asis, Aggregation: Significant Terms", @@ -1081,22 +1075,21 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1_col_6", []time.Time{util.ParseTime("2024-05-02T16:09:28.003Z")}), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__2__parent_count", - "response" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - quantiles(0.010000)("timestamp") AS "metric__2__1_col_0", - quantiles(0.020000)("timestamp") AS "metric__2__1_col_1", - quantiles(0.250000)("timestamp") AS "metric__2__1_col_2", - quantiles(0.500000)("timestamp") AS "metric__2__1_col_3", - quantiles(0.750000)("timestamp") AS "metric__2__1_col_4", - quantiles(0.950000)("timestamp") AS "metric__2__1_col_5", - quantiles(0.990000)("timestamp") AS "metric__2__1_col_6" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401475845) AND "timestamp"<= - fromUnixTimestamp64Milli(1714697475845)) - GROUP BY "response" AS "aggr__2__key_0" - ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC - LIMIT 4`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__2__parent_count`,\n" + + " `response` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " quantiles(0.010000)(`timestamp`) AS `metric__2__1_col_0`,\n" + + " quantiles(0.020000)(`timestamp`) AS `metric__2__1_col_1`,\n" + + " quantiles(0.250000)(`timestamp`) AS `metric__2__1_col_2`,\n" + + " quantiles(0.500000)(`timestamp`) AS `metric__2__1_col_3`,\n" + + " quantiles(0.750000)(`timestamp`) AS `metric__2__1_col_4`,\n" + + " quantiles(0.950000)(`timestamp`) AS `metric__2__1_col_5`,\n" + + " quantiles(0.990000)(`timestamp`) AS `metric__2__1_col_6`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1713401475845) AND `timestamp`<=\n" + + " fromUnixTimestamp64Milli(1714697475845))\n" + + "GROUP BY `response` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__count` DESC, `aggr__2__key_0` ASC\n" + + "LIMIT 4", }, { // [7] TestName: "Percentile_ranks keyed=false. Reproduce: Visualize -> Line -> Metrics: Percentile Ranks, Buckets: X-Asis Date Histogram", @@ -1232,16 +1225,15 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1_col_1", 50.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count", - countIf("AvgTicketPrice"<=0)/count(*)*100 AS "metric__2__1_col_0", - countIf("AvgTicketPrice"<=50000)/count(*)*100 AS "metric__2__1_col_1" - FROM ` + TableName + ` - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 3600000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`,\n" + + " countIf(`AvgTicketPrice`<=0)/count(*)*100 AS `metric__2__1_col_0`,\n" + + " countIf(`AvgTicketPrice`<=50000)/count(*)*100 AS `metric__2__1_col_1`\n" + + "FROM `" + TableName + "`\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 3600000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [8] TestName: "Min/max with simple script. Reproduce: Visualize -> Line -> Metrics: Count, Buckets: X-Asis Histogram", @@ -1341,10 +1333,9 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__minAgg_col_0", 0.0), }}, }, - ExpectedPancakeSQL: ` - SELECT maxOrNull(toHour("timestamp")) AS "metric__maxAgg_col_0", - minOrNull(toHour("timestamp")) AS "metric__minAgg_col_0" - FROM ` + TableName + ``, + ExpectedPancakeSQL: "SELECT maxOrNull(toHour(`timestamp`)) AS `metric__maxAgg_col_0`,\n" + + " minOrNull(toHour(`timestamp`)) AS `metric__minAgg_col_0`\n" + + "FROM `" + TableName + "`", }, { // [9] TestName: "Histogram with simple script. Reproduce: Visualize -> Line -> Metrics: Count, Buckets: X-Asis Histogram", @@ -1456,10 +1447,9 @@ var AggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", 34), }}, }, - ExpectedPancakeSQL: ` - SELECT toHour("timestamp") AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM ` + TableName + ` - GROUP BY toHour("timestamp") AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toHour(`timestamp`) AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `" + TableName + "`\n" + + "GROUP BY toHour(`timestamp`) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, } diff --git a/platform/testdata/opensearch-visualize/pipeline_aggregation_requests.go b/platform/testdata/opensearch-visualize/pipeline_aggregation_requests.go index 6c2a5bec7..961e3455a 100644 --- a/platform/testdata/opensearch-visualize/pipeline_aggregation_requests.go +++ b/platform/testdata/opensearch-visualize/pipeline_aggregation_requests.go @@ -125,13 +125,12 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__order_1", 1.0), }}, }, - ExpectedPancakeSQL: ` - SELECT "day_of_week_i" AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM __quesma_table_name - WHERE ("order_date">=fromUnixTimestamp64Milli(1706095390802) AND "order_date"<= - fromUnixTimestamp64Milli(1715163790802)) - GROUP BY "day_of_week_i" AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `day_of_week_i` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`order_date`>=fromUnixTimestamp64Milli(1706095390802) AND `order_date`<= \n" + + " fromUnixTimestamp64Milli(1715163790802))\n" + + "GROUP BY `day_of_week_i` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [1] TestName: "Cumulative sum with other aggregation. Reproduce: Visualize -> Vertical Bar: Metrics: Cumulative Sum (Aggregation: Average), Buckets: Histogram", @@ -253,12 +252,11 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-metric_col_0", 1.0), }}, }, - ExpectedPancakeSQL: ` - SELECT "day_of_week_i" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - avgOrNull("day_of_week_i") AS "metric__2__1-metric_col_0" - FROM __quesma_table_name - GROUP BY "day_of_week_i" AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `day_of_week_i` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " avgOrNull(`day_of_week_i`) AS `metric__2__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `day_of_week_i` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [2] TestName: "Cumulative sum to other cumulative sum. Reproduce: Visualize -> Vertical Bar: Metrics: Cumulative Sum (Aggregation: Cumulative Sum (Aggregation: Count)), Buckets: Histogram", @@ -376,11 +374,10 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(300)), }}, }, - ExpectedPancakeSQL: ` - SELECT "day_of_week_i" AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY "day_of_week_i" AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `day_of_week_i` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `day_of_week_i` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [3] TestName: "Cumulative sum - quite complex, a graph of pipelines. Reproduce: Visualize -> Vertical Bar: Metrics: Cumulative Sum (Aggregation: Cumulative Sum (Aggregation: Max)), Buckets: Histogram", @@ -511,12 +508,11 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-metric-metric_col_0", 200.0), }}, }, - ExpectedPancakeSQL: ` - SELECT "day_of_week_i" AS "aggr__2__key_0", count(*) AS "aggr__2__count", - maxOrNull("products.base_price") AS "metric__2__1-metric-metric_col_0" - FROM __quesma_table_name - GROUP BY "day_of_week_i" AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `day_of_week_i` AS `aggr__2__key_0`, count(*) AS `aggr__2__count`,\n" + + " maxOrNull(`products.base_price`) AS `metric__2__1-metric-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `day_of_week_i` AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [4] TestName: "Simplest Derivative (count). Reproduce: Visualize -> Vertical Bar: Metrics: Derivative (Aggregation: Count), Buckets: Histogram", @@ -641,12 +637,11 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(21)), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`bytes`/200)*200 AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`bytes`/200)*200 AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [5] TestName: "Derivative with other aggregation. Reproduce: Visualize -> Vertical Bar: Metrics: Derivative (Aggregation: Sum), Buckets: Date Histogram", @@ -847,13 +842,12 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-metric_col_0", 27.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count", - sumOrNull(toHour("timestamp")) AS "metric__2__1-metric_col_0" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`,\n" + + " sumOrNull(toHour(`timestamp`)) AS `metric__2__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [6] TestName: "Derivative to cumulative sum. Reproduce: Visualize -> Vertical Bar: Metrics: Derivative (Aggregation: Cumulative Sum (Aggregation: Count)), Buckets: Date Histogram", @@ -1226,13 +1220,12 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [7] TestName: "Simplest Serial Diff (count), lag=default (1). Reproduce: Visualize -> Vertical Bar: Metrics: Serial Diff (Aggregation: Count), Buckets: Histogram", @@ -1370,12 +1363,11 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(21)), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`bytes`/200)*200 AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`bytes`/200)*200 AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [8] TestName: "Simplest Serial Diff (count), lag=2. Don't know how to reproduce in OpenSearch, but you can click out:" + @@ -1517,12 +1509,11 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(21)), }}, }, - ExpectedPancakeSQL: ` - SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT floor(`bytes`/200)*200 AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY floor(`bytes`/200)*200 AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [9] TestName: "Serial diff with other aggregation. Reproduce: Visualize -> Vertical Bar: Metrics: Serial Diff (Aggregation: Sum), Buckets: Date Histogram", @@ -1723,13 +1714,12 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-metric_col_0", 27.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count", - sumOrNull(toHour("timestamp")) AS "metric__2__1-metric_col_0" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`,\n" + + " sumOrNull(toHour(`timestamp`)) AS `metric__2__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [10] TestName: "Serial Diff to cumulative sum. Reproduce: Visualize -> Vertical Bar: Metrics: Serial Diff (Aggregation: Cumulative Sum (Aggregation: Count)), Buckets: Date Histogram", @@ -1949,14 +1939,13 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__count", uint64(2)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 600000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 600000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, { // [11] TestName: "Simplest avg_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Average Bucket (Bucket: Date Histogram, Metric: Count)", @@ -2072,13 +2061,12 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__1-bucket__count", uint64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` ASC", }, { // [12] TestName: "avg_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Average Bucket (Bucket: Date Histogram, Metric: Max)", @@ -2213,14 +2201,13 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__1-bucket__1-metric_col_0", 9199.0), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count", - maxOrNull("bytes") AS "metric__1-bucket__1-metric_col_0" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`,\n" + + " maxOrNull(`bytes`) AS `metric__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` ASC", }, /* TODO need fix for date_range and subaggregations. Same one, as already merged ~1-2 weeks ago for range. It's WIP. { // [13] @@ -2673,25 +2660,24 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__2__1-bucket__count", uint64(3)), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__key_0", - "aggr__2__1-bucket__count" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__key_0", - "aggr__2__1-bucket__count", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__1-bucket__key_0" ASC) AS "aggr__2__1-bucket__order_1_rank" - FROM ( - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - "bytes" AS "aggr__2__1-bucket__key_0", - count(*) AS "aggr__2__1-bucket__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__2__key_0", "bytes" AS "aggr__2__1-bucket__key_0")) - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__1-bucket__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__1-bucket__key_0`,\n" + + " `aggr__2__1-bucket__count`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__1-bucket__key_0`,\n" + + " `aggr__2__1-bucket__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__1-bucket__key_0` ASC) AS `aggr__2__1-bucket__order_1_rank`\n" + + " FROM (\n" + + " SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " `bytes` AS `aggr__2__1-bucket__key_0`,\n" + + " count(*) AS `aggr__2__1-bucket__count`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__2__key_0`, `bytes` AS `aggr__2__1-bucket__key_0`))\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__1-bucket__order_1_rank` ASC", }, { // [15] TestName: "Simplest min_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Min Bucket (Bucket: Terms, Metric: Count)", @@ -2849,15 +2835,14 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__1-bucket__count", uint64(1)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", - "clientip" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1715413213606) AND "timestamp"<= - fromUnixTimestamp64Milli(1715467213606)) - GROUP BY "clientip" AS "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" DESC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1-bucket__parent_count`,\n" + + " `clientip` AS `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1715413213606) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1715467213606))\n" + + "GROUP BY `clientip` AS `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` DESC\n" + + "LIMIT 6", }, { // [16] TestName: "min_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Min Bucket (Bucket: Terms, Metric: Unique Count)", @@ -3029,14 +3014,13 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__1-bucket__1-metric_col_0", 1), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", - "clientip" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count", - uniq("geo.coordinates") AS "metric__1-bucket__1-metric_col_0" - FROM __quesma_table_name - GROUP BY "clientip" AS "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" DESC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1-bucket__parent_count`,\n" + + " `clientip` AS `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`,\n" + + " uniq(`geo.coordinates`) AS `metric__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `clientip` AS `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` DESC\n" + + "LIMIT 6", }, { // [17] TestName: "complex min_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Min Bucket (Bucket: Terms, Metric: Sum), Buckets: Split Series: Histogram", @@ -3236,30 +3220,29 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__1-bucket__1-metric_col_0", 18.0), }}, }, - ExpectedPancakeSQL: ` - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__parent_count", - "aggr__2__1-bucket__key_0", "aggr__2__1-bucket__count", - "metric__2__1-bucket__1-metric_col_0" - FROM ( - SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__parent_count", - "aggr__2__1-bucket__key_0", "aggr__2__1-bucket__count", - "metric__2__1-bucket__1-metric_col_0", - dense_rank() OVER (ORDER BY "aggr__2__key_0" ASC) AS "aggr__2__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY - "aggr__2__1-bucket__key_0" DESC) AS "aggr__2__1-bucket__order_1_rank" - FROM ( - SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS - "aggr__2__1-bucket__parent_count", - "clientip" AS "aggr__2__1-bucket__key_0", - count(*) AS "aggr__2__1-bucket__count", - sumOrNull("bytes") AS "metric__2__1-bucket__1-metric_col_0" - FROM __quesma_table_name - GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0", - "clientip" AS "aggr__2__1-bucket__key_0")) - WHERE "aggr__2__1-bucket__order_1_rank"<=3 - ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__1-bucket__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__1-bucket__parent_count`,\n" + + " `aggr__2__1-bucket__key_0`, `aggr__2__1-bucket__count`,\n" + + " `metric__2__1-bucket__1-metric_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__key_0`, `aggr__2__count`, `aggr__2__1-bucket__parent_count`,\n" + + " `aggr__2__1-bucket__key_0`, `aggr__2__1-bucket__count`,\n" + + " `metric__2__1-bucket__1-metric_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__key_0` ASC) AS `aggr__2__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__key_0` ORDER BY\n" + + " `aggr__2__1-bucket__key_0` DESC) AS `aggr__2__1-bucket__order_1_rank`\n" + + " FROM (\n" + + " SELECT floor(`bytes`/200)*200 AS `aggr__2__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS `aggr__2__count`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__2__key_0`) AS\n" + + " `aggr__2__1-bucket__parent_count`,\n" + + " `clientip` AS `aggr__2__1-bucket__key_0`,\n" + + " count(*) AS `aggr__2__1-bucket__count`,\n" + + " sumOrNull(`bytes`) AS `metric__2__1-bucket__1-metric_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " GROUP BY floor(`bytes`/200)*200 AS `aggr__2__key_0`,\n" + + " `clientip` AS `aggr__2__1-bucket__key_0`))\n" + + "WHERE `aggr__2__1-bucket__order_1_rank`<=3\n" + + "ORDER BY `aggr__2__order_1_rank` ASC, `aggr__2__1-bucket__order_1_rank` ASC", }, { // [18] TestName: "Simplest max_bucket. Reproduce: Visualize -> Line: Metrics: Max Bucket (Bucket: Terms, Metric: Count)", @@ -3381,15 +3364,14 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__1-bucket__count", uint64(1923)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", - "Cancelled" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1714255011264) AND "timestamp"<= - fromUnixTimestamp64Milli(1715551011264)) - GROUP BY "Cancelled" AS "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" DESC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1-bucket__parent_count`,\n" + + " `Cancelled` AS `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1714255011264) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1715551011264))\n" + + "GROUP BY `Cancelled` AS `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` DESC\n" + + "LIMIT 6", }, { // [19] TestName: "Max/Sum bucket with some null buckets. Reproduce: Visualize -> Vertical Bar: Metrics: Max (Sum) Bucket (Aggregation: Date Histogram, Metric: Min)", @@ -3538,14 +3520,13 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__1-bucket__1-metric_col_0", nil), }}, }, - ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count", - minOrNull("memory") AS "metric__1-bucket__1-metric_col_0" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" ASC`, + ExpectedPancakeSQL: "SELECT toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`,\n" + + " minOrNull(`memory`) AS `metric__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`timestamp`) / 600000) AS\n" + + " `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` ASC", }, { // [20] TestName: "Different pipeline aggrs with some null buckets. Reproduce: Visualize -> Vertical Bar: Metrics: Max/Sum Bucket/etc. (Aggregation: Histogram, Metric: Max)", @@ -3771,12 +3752,11 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__1-bucket__1-metric_col_0", float64(452)), }}, }, - ExpectedPancakeSQL: ` - SELECT "bytes" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count", - maxOrNull("memory") AS "metric__1-bucket__1-metric_col_0" - FROM __quesma_table_name - GROUP BY "bytes" AS "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" ASC`, + ExpectedPancakeSQL: "SELECT `bytes` AS `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`,\n" + + " maxOrNull(`memory`) AS `metric__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `bytes` AS `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` ASC", }, /* waits for probably a simple filters fix */ { // [21] @@ -3916,14 +3896,13 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("filter_1__metric__1-bucket__1-metric_col_0", 4968221.14887619), }}, }, - ExpectedPancakeSQL: ` - SELECT countIf("FlightDelayMin" > '-100') AS "filter_0__aggr__1-bucket__count", - sumOrNullIf("DistanceKilometers", "FlightDelayMin" > '-100') AS - "filter_0__metric__1-bucket__1-metric_col_0", - countIf(false) AS "filter_1__aggr__1-bucket__count", - sumOrNullIf("DistanceKilometers", false) AS - "filter_1__metric__1-bucket__1-metric_col_0" - FROM __quesma_table_name`, + ExpectedPancakeSQL: "SELECT countIf(`FlightDelayMin` > '-100') AS `filter_0__aggr__1-bucket__count`,\n" + + " sumOrNullIf(`DistanceKilometers`, `FlightDelayMin` > '-100') AS\n" + + " `filter_0__metric__1-bucket__1-metric_col_0`,\n" + + " countIf(false) AS `filter_1__aggr__1-bucket__count`,\n" + + " sumOrNullIf(`DistanceKilometers`, false) AS\n" + + " `filter_1__metric__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`", }, /* waits for probably a simple filters fix */ { // [22] TODO check this test with other pipeline aggregations @@ -4320,15 +4299,14 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("aggr__1-bucket__count", uint64(298)), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", - "extension" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" - FROM __quesma_table_name - WHERE ("timestamp">=fromUnixTimestamp64Milli(1714256186906) AND "timestamp"<= - fromUnixTimestamp64Milli(1715552186906)) - GROUP BY "extension" AS "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__key_0" DESC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1-bucket__parent_count`,\n" + + " `extension` AS `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`timestamp`>=fromUnixTimestamp64Milli(1714256186906) AND `timestamp`<= \n" + + " fromUnixTimestamp64Milli(1715552186906))\n" + + "GROUP BY `extension` AS `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__key_0` DESC\n" + + "LIMIT 6", }, { // [24] TestName: "sum_bucket. Reproduce: Visualize -> Horizontal Bar: Metrics: Sum Bucket (Bucket: Significant Terms, Metric: Average)", @@ -4476,14 +4454,13 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__1-bucket__1-metric_col_0", 12786004614.736841), }}, }, - ExpectedPancakeSQL: ` - SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", - "extension" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count", - avgOrNull("machine.ram") AS "metric__1-bucket__1-metric_col_0" - FROM __quesma_table_name - GROUP BY "extension" AS "aggr__1-bucket__key_0" - ORDER BY "aggr__1-bucket__count" DESC, "aggr__1-bucket__key_0" ASC - LIMIT 6`, + ExpectedPancakeSQL: "SELECT sum(count(*)) OVER () AS `aggr__1-bucket__parent_count`,\n" + + " `extension` AS `aggr__1-bucket__key_0`, count(*) AS `aggr__1-bucket__count`,\n" + + " avgOrNull(`machine.ram`) AS `metric__1-bucket__1-metric_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY `extension` AS `aggr__1-bucket__key_0`\n" + + "ORDER BY `aggr__1-bucket__count` DESC, `aggr__1-bucket__key_0` ASC\n" + + "LIMIT 6", }, { // [25] TestName: "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram", @@ -4846,37 +4823,36 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "timed_out": false, "took": 40 }`, - ExpectedPancakeSQL: ` - SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", - "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", - "metric__2__3__1-bucket__1-metric_col_0" - FROM ( - SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", - "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", - "metric__2__3__1-bucket__1-metric_col_0", - dense_rank() OVER (ORDER BY "aggr__2__3__key_0" ASC) AS - "aggr__2__3__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY - "aggr__2__3__1-bucket__key_0" ASC) AS "aggr__2__3__1-bucket__order_1_rank" - FROM ( - SELECT sum(countIf(("bytes">=0 AND "bytes"<1000))) OVER () AS - "aggr__2__count", floor("bytes"/200)*200 AS "aggr__2__3__key_0", - sum(countIf(("bytes">=0 AND "bytes"<1000))) OVER (PARTITION BY - "aggr__2__3__key_0") AS "aggr__2__3__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__3__1-bucket__key_0", - countIf(("bytes">=0 AND "bytes"<1000)) AS "aggr__2__3__1-bucket__count", - avgOrNullIf("memory", ("bytes">=0 AND "bytes"<1000)) AS - "metric__2__3__1-bucket__1-metric_col_0" - FROM __quesma_table_name - WHERE ("bytes">=0 AND "bytes"<1000) - GROUP BY floor("bytes"/200)*200 AS "aggr__2__3__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__3__1-bucket__key_0")) - ORDER BY "aggr__2__3__order_1_rank" ASC, - "aggr__2__3__1-bucket__order_1_rank" ASC`, + ExpectedPancakeSQL: "SELECT `aggr__2__count`, `aggr__2__3__key_0`, `aggr__2__3__count`,\n" + + " `aggr__2__3__1-bucket__key_0`, `aggr__2__3__1-bucket__count`,\n" + + " `metric__2__3__1-bucket__1-metric_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__count`, `aggr__2__3__key_0`, `aggr__2__3__count`,\n" + + " `aggr__2__3__1-bucket__key_0`, `aggr__2__3__1-bucket__count`,\n" + + " `metric__2__3__1-bucket__1-metric_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__3__key_0` ASC) AS\n" + + " `aggr__2__3__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__3__key_0` ORDER BY\n" + + " `aggr__2__3__1-bucket__key_0` ASC) AS `aggr__2__3__1-bucket__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(countIf((`bytes`>=0 AND `bytes`<1000))) OVER () AS\n" + + " `aggr__2__count`, floor(`bytes`/200)*200 AS `aggr__2__3__key_0`,\n" + + " sum(countIf((`bytes`>=0 AND `bytes`<1000))) OVER (PARTITION BY\n" + + " `aggr__2__3__key_0`) AS `aggr__2__3__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__3__1-bucket__key_0`,\n" + + " countIf((`bytes`>=0 AND `bytes`<1000)) AS `aggr__2__3__1-bucket__count`,\n" + + " avgOrNullIf(`memory`, (`bytes`>=0 AND `bytes`<1000)) AS\n" + + " `metric__2__3__1-bucket__1-metric_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`bytes`>=0 AND `bytes`<1000)\n" + + " GROUP BY floor(`bytes`/200)*200 AS `aggr__2__3__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__3__1-bucket__key_0`))\n" + + "ORDER BY `aggr__2__3__order_1_rank` ASC,\n" + + " `aggr__2__3__1-bucket__order_1_rank` ASC", ExpectedPancakeResults: []model.QueryResultRow{ {Cols: []model.QueryResultCol{ model.NewQueryResultCol("aggr__2__count", uint64(168)), @@ -4927,37 +4903,37 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 27400), }}, }, - ExpectedAdditionalPancakeSQLs: []string{` - SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", - "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", - "metric__2__3__1-bucket__1-metric_col_0" - FROM ( - SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", - "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", - "metric__2__3__1-bucket__1-metric_col_0", - dense_rank() OVER (ORDER BY "aggr__2__3__key_0" ASC) AS - "aggr__2__3__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY - "aggr__2__3__1-bucket__key_0" ASC) AS "aggr__2__3__1-bucket__order_1_rank" - FROM ( - SELECT sum(countIf(("bytes">=1000 AND "bytes"<2000))) OVER () AS - "aggr__2__count", floor("bytes"/200)*200 AS "aggr__2__3__key_0", - sum(countIf(("bytes">=1000 AND "bytes"<2000))) OVER (PARTITION BY - "aggr__2__3__key_0") AS "aggr__2__3__count", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__3__1-bucket__key_0", - countIf(("bytes">=1000 AND "bytes"<2000)) AS "aggr__2__3__1-bucket__count", - avgOrNullIf("memory", ("bytes">=1000 AND "bytes"<2000)) AS - "metric__2__3__1-bucket__1-metric_col_0" - FROM __quesma_table_name - WHERE ("bytes">=1000 AND "bytes"<2000) - GROUP BY floor("bytes"/200)*200 AS "aggr__2__3__key_0", - toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS - "aggr__2__3__1-bucket__key_0")) - ORDER BY "aggr__2__3__order_1_rank" ASC, - "aggr__2__3__1-bucket__order_1_rank" ASC`, + ExpectedAdditionalPancakeSQLs: []string{ + "SELECT `aggr__2__count`, `aggr__2__3__key_0`, `aggr__2__3__count`,\n" + + " `aggr__2__3__1-bucket__key_0`, `aggr__2__3__1-bucket__count`,\n" + + " `metric__2__3__1-bucket__1-metric_col_0`\n" + + "FROM (\n" + + " SELECT `aggr__2__count`, `aggr__2__3__key_0`, `aggr__2__3__count`,\n" + + " `aggr__2__3__1-bucket__key_0`, `aggr__2__3__1-bucket__count`,\n" + + " `metric__2__3__1-bucket__1-metric_col_0`,\n" + + " dense_rank() OVER (ORDER BY `aggr__2__3__key_0` ASC) AS\n" + + " `aggr__2__3__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__2__3__key_0` ORDER BY\n" + + " `aggr__2__3__1-bucket__key_0` ASC) AS `aggr__2__3__1-bucket__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(countIf((`bytes`>=1000 AND `bytes`<2000))) OVER () AS\n" + + " `aggr__2__count`, floor(`bytes`/200)*200 AS `aggr__2__3__key_0`,\n" + + " sum(countIf((`bytes`>=1000 AND `bytes`<2000))) OVER (PARTITION BY\n" + + " `aggr__2__3__key_0`) AS `aggr__2__3__count`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__3__1-bucket__key_0`,\n" + + " countIf((`bytes`>=1000 AND `bytes`<2000)) AS `aggr__2__3__1-bucket__count`,\n" + + " avgOrNullIf(`memory`, (`bytes`>=1000 AND `bytes`<2000)) AS\n" + + " `metric__2__3__1-bucket__1-metric_col_0`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`bytes`>=1000 AND `bytes`<2000)\n" + + " GROUP BY floor(`bytes`/200)*200 AS `aggr__2__3__key_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`timestamp`)+timeZoneOffset(toTimezone(\n" + + " `timestamp`, 'Europe/Warsaw'))*1000) / 43200000) AS\n" + + " `aggr__2__3__1-bucket__key_0`))\n" + + "ORDER BY `aggr__2__3__order_1_rank` ASC,\n" + + " `aggr__2__3__1-bucket__order_1_rank` ASC", }, ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ { diff --git a/platform/testdata/opensearch_requests.go b/platform/testdata/opensearch_requests.go index fa0ae2d18..895b15de3 100644 --- a/platform/testdata/opensearch_requests.go +++ b/platform/testdata/opensearch_requests.go @@ -80,23 +80,24 @@ var OpensearchSearchTests = []SearchTestCase{ "track_total_hits": true }`, WantedSql: []string{ - `("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149))`, + "(`__timestamp`>=fromUnixTimestamp64Milli(1712236698149) AND `__timestamp`<=fromUnixTimestamp64Milli(1712237598149))", }, WantedQueryType: model.ListAllFields, WantedQueries: []string{ - `SELECT "__bytes", "__timestamp", "message_____" - FROM __quesma_table_name - WHERE ("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149)) - ORDER BY "__timestamp" DESC LIMIT 500`, - `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64((toUnixTimestamp64Milli("__timestamp")+timeZoneOffset(toTimezone( - "__timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - WHERE ("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149)) - GROUP BY toInt64((toUnixTimestamp64Milli("__timestamp")+timeZoneOffset( - toTimezone("__timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + "SELECT `__bytes`, `__timestamp`, `message_____`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`__timestamp`>=fromUnixTimestamp64Milli(1712236698149) AND `__timestamp`<=fromUnixTimestamp64Milli(1712237598149))\n" + + "ORDER BY `__timestamp` DESC LIMIT 500", + + "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " toInt64((toUnixTimestamp64Milli(`__timestamp`)+timeZoneOffset(toTimezone(\n" + + " `__timestamp`, 'Europe/Warsaw'))*1000) / 30000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`__timestamp`>=fromUnixTimestamp64Milli(1712236698149) AND `__timestamp`<=fromUnixTimestamp64Milli(1712237598149))\n" + + "GROUP BY toInt64((toUnixTimestamp64Milli(`__timestamp`)+timeZoneOffset(\n" + + " toTimezone(`__timestamp`, 'Europe/Warsaw'))*1000) / 30000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, }, { @@ -171,17 +172,17 @@ var OpensearchSearchTests = []SearchTestCase{ "track_total_hits": true }`, WantedSql: []string{ - `("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149))`, + "(`__timestamp`>=fromUnixTimestamp64Milli(1712236698149) AND `__timestamp`<=fromUnixTimestamp64Milli(1712237598149))", }, WantedQueryType: model.Normal, WantedQueries: []string{ - `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64(toUnixTimestamp64Milli("__timestamp") / 30000) AS "aggr__2__key_0", - count(*) AS "aggr__2__count" - FROM __quesma_table_name - WHERE ("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149)) - GROUP BY toInt64(toUnixTimestamp64Milli("__timestamp") / 30000) AS "aggr__2__key_0" - ORDER BY "aggr__2__key_0" ASC`, + "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`__timestamp`) / 30000) AS `aggr__2__key_0`,\n" + + " count(*) AS `aggr__2__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`__timestamp`>=fromUnixTimestamp64Milli(1712236698149) AND `__timestamp`<=fromUnixTimestamp64Milli(1712237598149))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`__timestamp`) / 30000) AS `aggr__2__key_0`\n" + + "ORDER BY `aggr__2__key_0` ASC", }, }, } diff --git a/platform/testdata/requests.go b/platform/testdata/requests.go index 74de55573..7a3ef002a 100644 --- a/platform/testdata/requests.go +++ b/platform/testdata/requests.go @@ -148,21 +148,21 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "no comment yet", model.HitsCountInfo{Type: model.Normal}, []string{ - `SELECT sum(count(*)) OVER () AS "aggr__sample__count", - sum(count("host_name")) OVER () AS "metric__sample__sample_count_col_0", - sum(count(*)) OVER () AS "aggr__sample__top_values__parent_count", - "host_name" AS "aggr__sample__top_values__key_0", - count(*) AS "aggr__sample__top_values__count" - FROM ( - SELECT "host_name" - FROM __quesma_table_name - WHERE (("@timestamp">=fromUnixTimestamp64Milli(1706009236820) AND "@timestamp" - <=fromUnixTimestamp64Milli(1706010136820)) AND "message" iLIKE '%user%') - LIMIT 20000) - GROUP BY "host_name" AS "aggr__sample__top_values__key_0" - ORDER BY "aggr__sample__top_values__count" DESC, - "aggr__sample__top_values__key_0" ASC - LIMIT 11`, + "SELECT sum(count(*)) OVER () AS `aggr__sample__count`,\n" + + " sum(count(`host_name`)) OVER () AS `metric__sample__sample_count_col_0`,\n" + + " sum(count(*)) OVER () AS `aggr__sample__top_values__parent_count`,\n" + + " `host_name` AS `aggr__sample__top_values__key_0`,\n" + + " count(*) AS `aggr__sample__top_values__count`\n" + + "FROM (\n" + + " SELECT `host_name`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE ((`@timestamp`>=fromUnixTimestamp64Milli(1706009236820) AND `@timestamp\n" + + " `<=fromUnixTimestamp64Milli(1706010136820)) AND `message` iLIKE '%user%')\n" + + " LIMIT 20000)\n" + + "GROUP BY `host_name` AS `aggr__sample__top_values__key_0`\n" + + "ORDER BY `aggr__sample__top_values__count` DESC,\n" + + " `aggr__sample__top_values__key_0` ASC\n" + + "LIMIT 11", }, true, }, @@ -304,16 +304,16 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ `, "there should be 97 results, I truncated most of them", model.HitsCountInfo{Type: model.ListByField, RequestedFields: []string{"message"}, Size: 100}, []string{ - `SELECT "message" - FROM __quesma_table_name - WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481)) - AND "message" iLIKE '%user%') AND "message" IS NOT NULL) - ORDER BY "@timestamp" DESC - LIMIT 100`, - `SELECT count(*) AS "column_0" - FROM __quesma_table_name - WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481)) - AND "message" iLIKE '%user%') AND "message" IS NOT NULL)`, + "SELECT `message`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`@timestamp`>=fromUnixTimestamp64Milli(1706020999481) AND `@timestamp`<=fromUnixTimestamp64Milli(1706021899481)) \n" + + " AND `message` iLIKE '%user%') AND `message` IS NOT NULL)\n" + + "ORDER BY `@timestamp` DESC\n" + + "LIMIT 100", + "SELECT count(*) AS `column_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (((`@timestamp`>=fromUnixTimestamp64Milli(1706020999481) AND `@timestamp`<=fromUnixTimestamp64Milli(1706021899481)) \n" + + " AND `message` iLIKE '%user%') AND `message` IS NOT NULL)", }, false, }, @@ -554,12 +554,12 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ }`, "Truncated most results. TODO Check what's at the end of response, probably count?", model.HitsCountInfo{Type: model.ListAllFields, RequestedFields: []string{"*"}, Size: 500}, - []string{` - SELECT "@timestamp", "event_dataset", "host_name", "message", "properties_isreg" - FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) - ORDER BY "@timestamp" DESC - LIMIT 500`, + []string{ + "SELECT `@timestamp`, `event_dataset`, `host_name`, `message`, `properties_isreg`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`message` iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1706020999481) AND `@timestamp`<=fromUnixTimestamp64Milli(1706021899481)))\n" + + "ORDER BY `@timestamp` DESC\n" + + "LIMIT 500", }, false, }, @@ -693,17 +693,18 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "no comment yet", model.HitsCountInfo{Type: model.ListByField, RequestedFields: []string{"@timestamp"}, Size: 100}, []string{ - `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, - `SELECT "@timestamp" - FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) - LIMIT 100`, + "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`message` iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1706020999481) AND `@timestamp`<=fromUnixTimestamp64Milli(1706021899481)))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", + + "SELECT `@timestamp`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`message` iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1706020999481) AND `@timestamp`<=fromUnixTimestamp64Milli(1706021899481)))\n" + + "LIMIT 100", }, true, }, @@ -743,31 +744,31 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "no comment yet", model.HitsCountInfo{Type: model.Normal}, []string{ - `SELECT "aggr__stats__parent_count", "aggr__stats__key_0", "aggr__stats__count", - "aggr__stats__series__key_0", "aggr__stats__series__count" - FROM ( - SELECT "aggr__stats__parent_count", "aggr__stats__key_0", - "aggr__stats__count", "aggr__stats__series__key_0", - "aggr__stats__series__count", - dense_rank() OVER (ORDER BY "aggr__stats__count" DESC, "aggr__stats__key_0" - ASC) AS "aggr__stats__order_1_rank", - dense_rank() OVER (PARTITION BY "aggr__stats__key_0" ORDER BY - "aggr__stats__series__key_0" ASC) AS "aggr__stats__series__order_1_rank" - FROM ( - SELECT sum(count(*)) OVER () AS "aggr__stats__parent_count", - COALESCE("event_dataset", 'unknown') AS "aggr__stats__key_0", - sum(count(*)) OVER (PARTITION BY "aggr__stats__key_0") AS - "aggr__stats__count", - toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__stats__series__key_0", count(*) AS "aggr__stats__series__count" - FROM __quesma_table_name - WHERE ("@timestamp">fromUnixTimestamp64Milli(1706194439033) AND "@timestamp"<=fromUnixTimestamp64Milli(1706195339033)) - GROUP BY COALESCE("event_dataset", 'unknown') AS "aggr__stats__key_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__stats__series__key_0")) - WHERE "aggr__stats__order_1_rank"<=4 - ORDER BY "aggr__stats__order_1_rank" ASC, - "aggr__stats__series__order_1_rank" ASC`, + "SELECT `aggr__stats__parent_count`, `aggr__stats__key_0`, `aggr__stats__count`,\n" + + " `aggr__stats__series__key_0`, `aggr__stats__series__count`\n" + + "FROM (\n" + + " SELECT `aggr__stats__parent_count`, `aggr__stats__key_0`,\n" + + " `aggr__stats__count`, `aggr__stats__series__key_0`,\n" + + " `aggr__stats__series__count`,\n" + + " dense_rank() OVER (ORDER BY `aggr__stats__count` DESC, `aggr__stats__key_0`\n" + + " ASC) AS `aggr__stats__order_1_rank`,\n" + + " dense_rank() OVER (PARTITION BY `aggr__stats__key_0` ORDER BY\n" + + " `aggr__stats__series__key_0` ASC) AS `aggr__stats__series__order_1_rank`\n" + + " FROM (\n" + + " SELECT sum(count(*)) OVER () AS `aggr__stats__parent_count`,\n" + + " COALESCE(`event_dataset`, 'unknown') AS `aggr__stats__key_0`,\n" + + " sum(count(*)) OVER (PARTITION BY `aggr__stats__key_0`) AS\n" + + " `aggr__stats__count`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS\n" + + " `aggr__stats__series__key_0`, count(*) AS `aggr__stats__series__count`\n" + + " FROM `__quesma_table_name`\n" + + " WHERE (`@timestamp`>fromUnixTimestamp64Milli(1706194439033) AND `@timestamp`<=fromUnixTimestamp64Milli(1706195339033))\n" + + " GROUP BY COALESCE(`event_dataset`, 'unknown') AS `aggr__stats__key_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 60000) AS\n" + + " `aggr__stats__series__key_0`))\n" + + "WHERE `aggr__stats__order_1_rank`<=4\n" + + "ORDER BY `aggr__stats__order_1_rank` ASC,\n" + + " `aggr__stats__series__order_1_rank` ASC", }, true, }, @@ -851,12 +852,12 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "no comment yet", model.HitsCountInfo{Type: model.Normal}, []string{ - `SELECT minOrNull("@timestamp") AS "metric__earliest_timestamp_col_0", - maxOrNull("@timestamp") AS "metric__latest_timestamp_col_0", - count(*) AS "metric____quesma_total_count_col_0" - FROM __quesma_table_name - WHERE (("message" iLIKE '%posei%' AND "message" ILIKE '%User logged out%') AND - "host_name" ILIKE '%poseidon%')`, + "SELECT minOrNull(`@timestamp`) AS `metric__earliest_timestamp_col_0`,\n" + + " maxOrNull(`@timestamp`) AS `metric__latest_timestamp_col_0`,\n" + + " count(*) AS `metric____quesma_total_count_col_0`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE ((`message` iLIKE '%posei%' AND `message` ILIKE '%User logged out%') AND\n" + + " `host_name` ILIKE '%poseidon%')", }, true, }, @@ -873,9 +874,9 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "no comment yet", model.HitsCountInfo{Type: model.ListAllFields, RequestedFields: []string{"*"}, Size: 50}, []string{ - `SELECT "@timestamp", "event_dataset", "host_name", "message", "properties_isreg" - FROM __quesma_table_name - LIMIT 50`, + "SELECT `@timestamp`, `event_dataset`, `host_name`, `message`, `properties_isreg`\n" + + "FROM `__quesma_table_name`\n" + + "LIMIT 50", }, false, }, @@ -938,14 +939,8 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ ``, "happens e.g. in Explorer > Field Statistics view", model.HitsCountInfo{Type: model.ListByField, RequestedFields: []string{"properties::isreg"}, Size: 100}, - []string{` - SELECT "properties_isreg" - FROM __quesma_table_name - WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1710171234276) AND "@timestamp" - <=fromUnixTimestamp64Milli(1710172134276)) AND ("@timestamp">= - fromUnixTimestamp64Milli(1710171234276) AND "@timestamp"<= - fromUnixTimestamp64Milli(1710172134276))) AND "properties_isreg" IS NOT NULL) - LIMIT 100`, + []string{ + "SELECT `properties_isreg`\nFROM `__quesma_table_name`\nWHERE (((`@timestamp`>=fromUnixTimestamp64Milli(1710171234276) AND `@timestamp\n `<=fromUnixTimestamp64Milli(1710172134276)) AND (`@timestamp`>=\n fromUnixTimestamp64Milli(1710171234276) AND `@timestamp`<=\n fromUnixTimestamp64Milli(1710172134276))) AND `properties_isreg` IS NOT NULL)\nLIMIT 100", }, false, }, @@ -964,7 +959,7 @@ var TestsSearch = []SearchTestCase{ []string{""}, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` LIMIT 10`, + "SELECT `message` FROM `__quesma_table_name` LIMIT 10", }, []string{}, }, @@ -984,11 +979,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": true }`, - []string{`"type"='task'`}, + []string{"`type`='task'"}, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` WHERE "type"='task' LIMIT 10`, - `SELECT count(*) AS "column_0" FROM ` + TableName, + "SELECT `message` FROM " + TableName + " WHERE `type`='task' LIMIT 10", + "SELECT count(*) AS `column_0` FROM " + TableName, }, []string{}, }, @@ -1014,11 +1009,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": true }`, - []string{`("type"='task' AND "task.enabled" IN tuple(true, 54, 'abc', 'abc\'s'))`}, + []string{`(` + "`type`" + `='task' AND ` + "`task.enabled`" + ` IN tuple(true, 54, 'abc', 'abc\'s'))`}, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` WHERE ("type"='task' AND "task.enabled" IN tuple(true, 54, 'abc', 'abc\\'s')) LIMIT 10`, - `SELECT count(*) AS "column_0" FROM ` + TableName, + "SELECT `message` FROM " + TableName + " WHERE (`type`='task' AND `task.enabled` IN tuple(true, 54, 'abc', 'abc\\'s')) LIMIT 10", + "SELECT count(*) AS `column_0` FROM " + TableName, }, []string{}, }, @@ -1053,14 +1048,14 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": true }`, []string{ - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705487298815) AND "@timestamp"<=fromUnixTimestamp64Milli(1705488198815)))`, + `(` + fullTextFieldName + ` iLIKE '%user%' AND (` + "`@timestamp`" + `>=fromUnixTimestamp64Milli(1705487298815) AND ` + "`@timestamp`" + `<=fromUnixTimestamp64Milli(1705488198815)))`, }, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` WHERE ("message" iLIKE '%user%' ` + - `AND ("@timestamp">=fromUnixTimestamp64Milli(1705487298815) AND "@timestamp"<=fromUnixTimestamp64Milli(1705488198815))) ` + - `LIMIT 10`, - `SELECT count(*) AS "column_0" FROM ` + TableName, + "SELECT `message` FROM " + TableName + " WHERE (`message` iLIKE '%user%' " + + "AND (`@timestamp`>=fromUnixTimestamp64Milli(1705487298815) AND `@timestamp`<=fromUnixTimestamp64Milli(1705488198815))) " + + "LIMIT 10", + "SELECT count(*) AS `column_0` FROM " + TableName, }, []string{}, }, @@ -1092,16 +1087,16 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": true }`, []string{ - `((("user.id"='kimchy' AND "tags"='production') AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age">=10 AND "age"<=20)))`, + `(((` + "`user.id`" + `='kimchy' AND ` + "`tags`" + `='production') AND (` + "`tags`" + `='env1' OR ` + "`tags`" + `='deployed')) AND NOT ((` + "`age`" + `>=10 AND ` + "`age`" + `<=20)))`, }, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` WHERE ((("user.id"='kimchy' AND "tags"='production') ` + - `AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age".=.0 AND "age".=.0))) ` + - `LIMIT 10`, - `SELECT count(*) AS "column_0" FROM ` + TableName + ` ` + - `WHERE ((("user.id"='kimchy' AND "tags"='production') ` + - `AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age".=.0 AND "age".=.0)))`, + "SELECT `message` FROM " + TableName + " WHERE (((`user.id`='kimchy' AND `tags`='production') " + + "AND (`tags`='env1' OR `tags`='deployed')) AND NOT ((`age`=.0 AND `age`=.0))) " + + "LIMIT 10", + "SELECT count(*) AS `column_0` FROM " + TableName + " " + + "WHERE (((`user.id`='kimchy' AND `tags`='production') " + + "AND (`tags`='env1' OR `tags`='deployed')) AND NOT ((`age`=.0 AND `age`=.0)))", }, []string{}, }, @@ -1131,9 +1126,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"host_name" __quesma_match '%prometheus%'`}, + []string{"`host_name` __quesma_match '%prometheus%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "host_name"='prometheus' LIMIT 10`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `host_name`='prometheus' LIMIT 10", + }, []string{}, }, { // [6] @@ -1148,12 +1145,12 @@ var TestsSearch = []SearchTestCase{ "size": 100, "track_total_hits": false }`, - []string{`((("message" __quesma_match '%this%' OR "message" __quesma_match '%is%') OR "message" __quesma_match '%a%') OR "message" __quesma_match '%test%')`}, + []string{"(((`message` __quesma_match '%this%' OR `message` __quesma_match '%is%') OR `message` __quesma_match '%a%') OR `message` __quesma_match '%test%')"}, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` WHERE ((("message" ILIKE '%this%' OR "message" ILIKE '%is%') ` + - `OR "message" ILIKE '%a%') OR "message" ILIKE '%test%') ` + - `LIMIT 100`, + "SELECT `message` FROM " + TableName + " WHERE (((`message` ILIKE '%this%' OR `message` ILIKE '%is%') " + + "OR `message` ILIKE '%a%') OR `message` ILIKE '%test%') " + + "LIMIT 100", }, []string{}, }, @@ -1174,9 +1171,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"status"='pending'`}, + []string{"`status`='pending'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "status"='pending'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `status`='pending'", + }, []string{}, }, { // [8] @@ -1222,12 +1221,12 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false }`, []string{ - `("type"='upgrade-assistant-reindex-operation' AND NOT (("namespace" IS NOT NULL OR "namespaces" IS NOT NULL)))`}, + "(`type`='upgrade-assistant-reindex-operation' AND NOT ((`namespace` IS NOT NULL OR `namespaces` IS NOT NULL)))"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE ("type"='upgrade-assistant-reindex-operation' AND NOT (("namespace" IS NOT NULL OR "namespaces" IS NOT NULL)))`, + "SELECT `message` " + + "FROM " + TableName + " " + + "WHERE (`type`='upgrade-assistant-reindex-operation' AND NOT ((`namespace` IS NOT NULL OR `namespaces` IS NOT NULL)))", }, []string{}, }, @@ -1252,9 +1251,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"exception-list-agnostic.list_id" __quesma_match 'endpoint\_event\_filters'`}, + []string{"`exception-list-agnostic.list_id` __quesma_match 'endpoint\\_event\\_filters'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "exception-list-agnostic.list_id"='endpoint\\_event\\_filters'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `exception-list-agnostic.list_id`='endpoint\\_event\\_filters'", + }, []string{}, }, { // [10] @@ -1281,7 +1282,9 @@ var TestsSearch = []SearchTestCase{ }`, []string{fullTextFieldName + ` __quesma_match 'ingest-agent-policies'`}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE ` + fullTextFieldName + ` ILIKE 'ingest-agent-policies'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `" + fullTextFieldName + "` ILIKE 'ingest-agent-policies'", + }, []string{}, }, { // [11] @@ -1303,9 +1306,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"task.taskType" iLIKE 'alerting:%'`}, + []string{"`task.taskType` iLIKE 'alerting:%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "task.taskType" iLIKE 'alerting:%'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `task.taskType` iLIKE 'alerting:%'", + }, []string{}, }, { // [12] @@ -1327,9 +1332,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"alert.actions.actionRef" iLIKE 'preconfigured:%'`}, + []string{"`alert.actions.actionRef` iLIKE 'preconfigured:%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "alert.actions.actionRef" iLIKE 'preconfigured:%'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `alert.actions.actionRef` iLIKE 'preconfigured:%'", + }, []string{}, }, { // [13] @@ -1342,9 +1349,11 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false, "size": 10 }`, - []string{`"user" iLIKE 'ki%'`}, + []string{"`user` iLIKE 'ki%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "user" iLIKE 'ki%'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `user` iLIKE 'ki%'", + }, []string{}, }, { // [14] @@ -1357,9 +1366,11 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false, "size": 10 }`, - []string{`"user" iLIKE 'ki\%\_\\ \\\%%'`}, + []string{"`user` iLIKE 'ki\\%\\_\\\\ \\\\\\%%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "user" iLIKE 'ki\\%\\_\\\\ \\\\\\%%'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `user` iLIKE 'ki\\%\\_\\\\ \\\\\\%%'", + }, []string{}, }, { // [15] @@ -1377,9 +1388,11 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false, "size": 1 }`, - []string{`"message" __quesma_match '% logged'`}, + []string{"`message` __quesma_match '% logged'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" ILIKE '% logged'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `message` ILIKE '% logged'", + }, []string{}, }, { // [16] @@ -1399,8 +1412,8 @@ var TestsSearch = []SearchTestCase{ []string{""}, model.ListAllFields, []string{ - `SELECT count(*) AS "column_0" FROM ` + TableName, - `SELECT "message" FROM ` + TableName, + "SELECT count(*) AS `column_0` FROM `" + TableName + "`", + "SELECT `message` FROM `" + TableName + "`", }, []string{}, }, @@ -1414,9 +1427,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"message" __quesma_match '%this is a test%'`}, + []string{"`message` __quesma_match '%this is a test%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" ILIKE '%this is a test%'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `message` ILIKE '%this is a test%'", + }, []string{}, }, { // [18] @@ -1432,9 +1447,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"message" __quesma_match '%this is a test%'`}, + []string{"`message` __quesma_match '%this is a test%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" ILIKE '%this is a test%'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `message` ILIKE '%this is a test%'", + }, []string{}, }, { // [19] @@ -1465,9 +1482,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"references.type"='tag'`}, + []string{"`references.type`='tag'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "references.type"='tag'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `references.type`='tag'", + }, []string{}, }, { // [20] @@ -1530,24 +1549,23 @@ var TestsSearch = []SearchTestCase{ } `, []string{ - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299)))`, - `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) ` + - `AND "stream.namespace" IS NOT NULL)`, + "(" + fullTextFieldName + " iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299)))", + "((" + fullTextFieldName + " iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299))) AND `stream.namespace` IS NOT NULL)", }, model.Normal, []string{}, []string{ - `SELECT uniqMerge(uniqState("stream_namespace")) OVER () AS - "metric__unique_terms_col_0", - sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "stream_namespace" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) - GROUP BY "stream_namespace" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + "SELECT uniqMerge(uniqState(`stream_namespace`)) OVER () AS" + + " `metric__unique_terms_col_0`," + + " sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`," + + " sum(count(*)) OVER () AS `aggr__suggestions__parent_count`," + + " `stream_namespace` AS `aggr__suggestions__key_0`," + + " count(*) AS `aggr__suggestions__count`" + + "FROM __quesma_table_name" + + "WHERE (`message` iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299)))" + + "GROUP BY `stream_namespace` AS `aggr__suggestions__key_0`" + + "ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC" + + "LIMIT 11", }, }, { // [21] @@ -1617,22 +1635,22 @@ var TestsSearch = []SearchTestCase{ } `, []string{ - `(("service.name"='admin' AND ("@timestamp">=fromUnixTimestamp64Milli(1705934075873) AND "@timestamp"<=fromUnixTimestamp64Milli(1705934975873))) ` + - `AND "namespace" IS NOT NULL)`, - `("service.name"='admin' AND ("@timestamp">=fromUnixTimestamp64Milli(1705934075873) AND "@timestamp"<=fromUnixTimestamp64Milli(1705934975873)))`, + "((`service.name`='admin' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705934075873) AND `@timestamp`<=fromUnixTimestamp64Milli(1705934975873))) " + + "AND `namespace` IS NOT NULL)", + "(`service.name`='admin' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705934075873) AND `@timestamp`<=fromUnixTimestamp64Milli(1705934975873)))", }, model.Normal, []string{}, []string{ - `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" - , sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "namespace" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM __quesma_table_name - WHERE ("service_name"='admin' AND ("@timestamp">=fromUnixTimestamp64Milli(1705934075873) AND "@timestamp"<=fromUnixTimestamp64Milli(1705934975873))) - GROUP BY "namespace" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + "SELECT uniqMerge(uniqState(`namespace`)) OVER () AS `metric__unique_terms_col_0`" + + " , sum(count(*)) OVER () AS `aggr__suggestions__parent_count`," + + " `namespace` AS `aggr__suggestions__key_0`," + + " count(*) AS `aggr__suggestions__count`" + + "FROM __quesma_table_name" + + "WHERE (`service_name`='admin' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705934075873) AND `@timestamp`<=fromUnixTimestamp64Milli(1705934975873)))" + + "GROUP BY `namespace` AS `aggr__suggestions__key_0`" + + "ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC" + + "LIMIT 11", }, }, { // [22] @@ -1696,27 +1714,27 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": true }`, []string{ - `(("message" __quesma_match '%User logged out%' AND "host.name" __quesma_match '%poseidon%') ` + - `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))`, - `((("message" __quesma_match '%User logged out%' AND "host.name" __quesma_match '%poseidon%') ` + - `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) ` + - `AND "stream.namespace" IS NOT NULL)`, + "((`message` __quesma_match '%User logged out%' AND `host.name` __quesma_match '%poseidon%') " + + "AND (`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND `@timestamp`<=fromUnixTimestamp64Milli(1706551896491)))", + "(((`message` __quesma_match '%User logged out%' AND `host.name` __quesma_match '%poseidon%') " + + "AND (`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND `@timestamp`<=fromUnixTimestamp64Milli(1706551896491))) " + + "AND `stream.namespace` IS NOT NULL)", }, model.Normal, []string{}, []string{ - `SELECT uniqMerge(uniqState("stream_namespace")) OVER () AS - "metric__unique_terms_col_0", - sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "stream_namespace" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM __quesma_table_name - WHERE (("message" ILIKE '%User logged out%' AND "host_name"='poseidon') - AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) - GROUP BY "stream_namespace" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + "SELECT uniqMerge(uniqState(`stream_namespace`)) OVER () AS" + + " `metric__unique_terms_col_0`," + + " sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`," + + " sum(count(*)) OVER () AS `aggr__suggestions__parent_count`," + + " `stream_namespace` AS `aggr__suggestions__key_0`," + + " count(*) AS `aggr__suggestions__count`" + + "FROM __quesma_table_name" + + "WHERE ((`message` ILIKE '%User logged out%' AND `host_name`='poseidon')" + + " AND (`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND `@timestamp`<=fromUnixTimestamp64Milli(1706551896491)))" + + "GROUP BY `stream_namespace` AS `aggr__suggestions__key_0`" + + "ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC" + + "LIMIT 11", }, }, { // [23] @@ -1777,22 +1795,22 @@ var TestsSearch = []SearchTestCase{ "timeout": "1000ms" }`, []string{ - `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) ` + - `AND "namespace" IS NOT NULL)`, - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299)))`, + "((" + fullTextFieldName + " iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299))) " + + "AND `namespace` IS NOT NULL)", + "(" + fullTextFieldName + " iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299)))", }, model.Normal, []string{}, []string{ - `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" - , sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "namespace" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) - GROUP BY "namespace" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + "SELECT uniqMerge(uniqState(`namespace`)) OVER () AS `metric__unique_terms_col_0`" + + " , sum(count(*)) OVER () AS `aggr__suggestions__parent_count`," + + " `namespace` AS `aggr__suggestions__key_0`," + + " count(*) AS `aggr__suggestions__count`" + + "FROM __quesma_table_name" + + "WHERE (`message` iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299)))" + + "GROUP BY `namespace` AS `aggr__suggestions__key_0`" + + "ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC" + + "LIMIT 11", }, }, { // [24] @@ -1856,25 +1874,25 @@ var TestsSearch = []SearchTestCase{ "timeout": "1000ms" }`, []string{ - `(("message" __quesma_match '%User logged out%' AND "host.name" __quesma_match '%poseidon%') ` + - `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) ` + - `AND "namespace" IS NOT NULL)`, - `(("message" __quesma_match '%User logged out%' AND "host.name" __quesma_match '%poseidon%') ` + - `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))`, + `((` + "`message`" + ` __quesma_match '%User logged out%' AND ` + "`host.name`" + ` __quesma_match '%poseidon%') ` + + `AND (` + "`@timestamp`" + `>=fromUnixTimestamp64Milli(1706542596491) AND ` + "`@timestamp`" + `<=fromUnixTimestamp64Milli(1706551896491))) ` + + `AND ` + "`namespace`" + ` IS NOT NULL)`, + `((` + "`message`" + ` __quesma_match '%User logged out%' AND ` + "`host.name`" + ` __quesma_match '%poseidon%') ` + + `AND (` + "`@timestamp`" + `>=fromUnixTimestamp64Milli(1706542596491) AND ` + "`@timestamp`" + `<=fromUnixTimestamp64Milli(1706551896491)))`, }, model.Normal, []string{}, []string{ - `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" - , sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "namespace" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM __quesma_table_name - WHERE (("message" ILIKE '%User logged out%' AND "host_name"='poseidon') - AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) - GROUP BY "namespace" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + "SELECT uniqMerge(uniqState(`namespace`)) OVER () AS `metric__unique_terms_col_0`" + + " , sum(count(*)) OVER () AS `aggr__suggestions__parent_count`," + + " `namespace` AS `aggr__suggestions__key_0`," + + " count(*) AS `aggr__suggestions__count`" + + "FROM __quesma_table_name" + + "WHERE ((`message` ILIKE '%User logged out%' AND `host_name`='poseidon')" + + " AND (`@timestamp`>=fromUnixTimestamp64Milli(1706542596491) AND `@timestamp`<=fromUnixTimestamp64Milli(1706551896491)))" + + "GROUP BY `namespace` AS `aggr__suggestions__key_0`" + + "ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC" + + "LIMIT 11", }, }, { // [25] @@ -1935,21 +1953,21 @@ var TestsSearch = []SearchTestCase{ "timeout": "1000ms" }`, []string{ - `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) AND "namespace" IS NOT NULL)`, - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299)))`, + `((` + fullTextFieldName + ` iLIKE '%user%' AND (` + "`@timestamp`" + `>=fromUnixTimestamp64Milli(1705915570299) AND ` + "`@timestamp`" + `<=fromUnixTimestamp64Milli(1705916470299))) AND ` + "`namespace`" + ` IS NOT NULL)`, + `(` + fullTextFieldName + ` iLIKE '%user%' AND (` + "`@timestamp`" + `>=fromUnixTimestamp64Milli(1705915570299) AND ` + "`@timestamp`" + `<=fromUnixTimestamp64Milli(1705916470299)))`, }, model.Normal, []string{}, []string{ - `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" - , sum(count(*)) OVER () AS "aggr__suggestions__parent_count", - "namespace" AS "aggr__suggestions__key_0", - count(*) AS "aggr__suggestions__count" - FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) - GROUP BY "namespace" AS "aggr__suggestions__key_0" - ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC - LIMIT 11`, + "SELECT uniqMerge(uniqState(`namespace`)) OVER () AS `metric__unique_terms_col_0`" + + " , sum(count(*)) OVER () AS `aggr__suggestions__parent_count`," + + " `namespace` AS `aggr__suggestions__key_0`," + + " count(*) AS `aggr__suggestions__count`" + + "FROM __quesma_table_name" + + "WHERE (`message` iLIKE '%user%' AND (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp`<=fromUnixTimestamp64Milli(1705916470299)))" + + "GROUP BY `namespace` AS `aggr__suggestions__key_0`" + + "ORDER BY `aggr__suggestions__count` DESC, `aggr__suggestions__key_0` ASC" + + "LIMIT 11", }, }, { // [26] @@ -1989,8 +2007,8 @@ var TestsSearch = []SearchTestCase{ []string{""}, model.ListByField, []string{ - `SELECT count(*) AS "column_0" FROM ` + TableName, - `SELECT "message" FROM ` + TableName + ` LIMIT 500`, + "SELECT count(*) AS `column_0` FROM `" + TableName + "`", + "SELECT `message` FROM `" + TableName + "` LIMIT 500", }, []string{}, }, @@ -2008,8 +2026,8 @@ var TestsSearch = []SearchTestCase{ []string{``}, model.ListAllFields, []string{ - `SELECT count(*) AS "column_0" FROM ` + TableName, - `SELECT "message" FROM ` + TableName + ` LIMIT 10`, + "SELECT count(*) AS `column_0` FROM `" + TableName + "`", + "SELECT `message` FROM `" + TableName + "` LIMIT 10", }, []string{}, }, @@ -2027,7 +2045,7 @@ var TestsSearch = []SearchTestCase{ []string{``}, model.ListAllFields, []string{ - `SELECT "message" FROM ` + TableName + ` LIMIT 10`, + "SELECT `message` FROM `" + TableName + "` LIMIT 10", }, []string{}, }, @@ -2044,7 +2062,9 @@ var TestsSearch = []SearchTestCase{ }`, []string{``}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName}, + []string{ + "SELECT `message` FROM `" + TableName + "`", + }, []string{}, }, { // [30] @@ -2064,8 +2084,8 @@ var TestsSearch = []SearchTestCase{ []string{``}, model.ListAllFields, []string{ - `SELECT count(*) AS "column_0" FROM ` + TableName, - `SELECT "message" FROM ` + TableName, + "SELECT count(*) AS `column_0` FROM `" + TableName + "`", + "SELECT `message` FROM `" + TableName + "`", }, []string{}, }, @@ -2094,12 +2114,14 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false, "size": 12 }`, - []string{`("message" __quesma_match '%User logged out%' AND "message" __quesma_match '%User logged out%')`}, + []string{ + `(` + "`message`" + ` __quesma_match '%User logged out%' AND ` + "`message`" + ` __quesma_match '%User logged out%')`, + }, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE ("message" ILIKE '%User logged out%' AND "message" ILIKE '%User logged out%')`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE (`message` ILIKE '%User logged out%' AND `message` ILIKE '%User logged out%')", }, []string{}, }, @@ -2109,8 +2131,8 @@ var TestsSearch = []SearchTestCase{ []string{""}, model.ListAllFields, []string{ - `SELECT count(*) AS "column_0" FROM (SELECT 1 FROM ` + TableName + ` LIMIT 10000)`, - `SELECT "message" FROM __quesma_table_name LIMIT 10`, + "SELECT count(*) AS `column_0` FROM (SELECT 1 FROM `" + TableName + "` LIMIT 10000)", + "SELECT `message` FROM __quesma_table_name LIMIT 10", }, []string{}, }, @@ -2127,9 +2149,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"user.id"='kimchy'`}, + []string{"`user.id`='kimchy'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "user.id"='kimchy'`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `user.id`='kimchy'", + }, []string{}, }, { // [34] this is a snowflake case as `_id` is a special field in ES and in clickhouse we compute @@ -2157,12 +2181,14 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false }`, []string{ - `("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp" = toDateTime64('2024-05-24 13:32:47.307',3))`, + "(`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp` = toDateTime64('2024-05-24 13:32:47.307',3))", }, model.ListAllFields, // TestSearchHandler is pretty blunt with config loading so the test below can't be used. // We will probably refactor it as we move forwards with schema which will get even more side-effecting - []string{`SELECT "message" FROM ` + TableName + ` WHERE ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp" = toDateTime64('2024-05-24 13:32:47.307',3)) LIMIT 10`}, + []string{ + "SELECT `message` FROM " + TableName + " WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1705915570299) AND `@timestamp` = toDateTime64('2024-05-24 13:32:47.307',3)) LIMIT 10", + }, []string{}, }, { // [35] Comments in queries @@ -2177,9 +2203,9 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"user.id"='kimchy'`}, + []string{"`user.id`='kimchy'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "user.id"='kimchy'`}, + []string{"SELECT `message` FROM `" + TableName + "` WHERE `user.id`='kimchy'"}, []string{}, }, { // [36] terms with range @@ -2211,15 +2237,17 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`("cliIP" IN tuple('2601:204:c503:c240:9c41:5531:ad94:4d90', '50.116.43.98', '75.246.0.64') AND ("@timestamp">=fromUnixTimestamp64Milli(1715817600000) AND "@timestamp"<=fromUnixTimestamp64Milli(1715990399000)))`}, + []string{ + "(`cliIP` IN tuple('2601:204:c503:c240:9c41:5531:ad94:4d90', '50.116.43.98', '75.246.0.64') AND (`@timestamp`>=fromUnixTimestamp64Milli(1715817600000) AND `@timestamp`<=fromUnixTimestamp64Milli(1715990399000)))", + }, model.ListAllFields, //[]model.Query{withLimit(justSimplestWhere(`("cliIP" IN ('2601:204:c503:c240:9c41:5531:ad94:4d90','50.116.43.98','75.246.0.64') AND ("@timestamp">=parseDateTime64BestEffort('2024-05-16T00:00:00') AND "@timestamp"<=parseDateTime64BestEffort('2024-05-17T23:59:59')))`), 1)}, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE ("cliIP" IN tuple('2601:204:c503:c240:9c41:5531:ad94:4d90', '50.116.43.98', '75.246.0.64') ` + - `AND ("@timestamp">=fromUnixTimestamp64Milli(1715817600000) AND "@timestamp"<=fromUnixTimestamp64Milli(1715990399000))) ` + - `LIMIT 1`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE (`cliIP` IN tuple('2601:204:c503:c240:9c41:5531:ad94:4d90', '50.116.43.98', '75.246.0.64') " + + "AND (`@timestamp`>=fromUnixTimestamp64Milli(1715817600000) AND `@timestamp`<=fromUnixTimestamp64Milli(1715990399000))) " + + "LIMIT 1", }, []string{}, }, @@ -2241,13 +2269,13 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"field" LIKE '%-abb-all-li_mit%s-5'`}, + []string{"`field` LIKE '%-abb-all-li_mit%s-5'"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "field" LIKE '%-abb-all-li_mit%s-5' ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `field` LIKE '%-abb-all-li_mit%s-5' " + + "LIMIT 10", }, []string{}, }, @@ -2271,13 +2299,13 @@ var TestsSearch = []SearchTestCase{ }`, // Escaping _ twice ("\\_") seemed wrong, but it actually works in Clickhouse! // \\\\ means 2 escaped backslashes, actual returned string is "\\" - []string{`"field" LIKE '%\\___'`}, + []string{"`field` LIKE '%\\\\___'"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "field" LIKE '%\\\\___' ` + - `LIMIT 10`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `field` LIKE '%\\\\___' " + + "LIMIT 10", }, []string{}, }, @@ -2299,13 +2327,13 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"field" REGEXP 'a*-abb-all-li.mit.*s-5'`}, + []string{"`field` REGEXP 'a*-abb-all-li.mit.*s-5'"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "field" REGEXP 'a*-abb-all-li.mit.*s-5' ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `field` REGEXP 'a*-abb-all-li.mit.*s-5' " + + "LIMIT 10", }, []string{}, }, @@ -2327,13 +2355,13 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"field" REGEXP 'a?'`}, + []string{"`field` REGEXP 'a?'"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "field" REGEXP 'a\?' ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `field` REGEXP 'a\\?' " + + "LIMIT 10", }, []string{}, }, @@ -2354,11 +2382,11 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`"message" __quesma_match '% -Men\'s Clothing \\ %'`}, + []string{"`message` __quesma_match '%\nMen\\'s Clothing \\\\ \t%'"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" ILIKE '% -Men\\'s Clothing \\\\ %' LIMIT 10`}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE `message` ILIKE '%Men\\'s Clothing \\\\ %' LIMIT 10", + }, []string{}, }, { // [42] @@ -2374,10 +2402,10 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, []string{`false`}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE false ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE false " + + "LIMIT 10", }, []string{}, }, @@ -2391,13 +2419,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2024-12-21 07:29:03.367',3)`}, + []string{"`@timestamp` = toDateTime64('2024-12-21 07:29:03.367',3)"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2024-12-21 07:29:03.367',3) ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2024-12-21 07:29:03.367',3) " + + "LIMIT 10", }, []string{}, }, @@ -2412,13 +2440,15 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2025-07-06 09:38:03.12',2)`}, + []string{ + "`@timestamp` = toDateTime64('2025-07-06 09:38:03.12',2)", + }, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2025-07-06 09:38:03.12',2) ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2025-07-06 09:38:03.12',2) " + + "LIMIT 10", }, []string{}, }, @@ -2433,13 +2463,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2025-07-04 13:32:43.377',3)`}, + []string{"`@timestamp` = toDateTime64('2025-07-04 13:32:43.377',3)"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2025-07-04 13:32:43.377',3) ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2025-07-04 13:32:43.377',3) " + + "LIMIT 10", }, []string{}, }, @@ -2454,13 +2484,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2025-07-06 10:11:03.123456789',9)`}, + []string{"`@timestamp` = toDateTime64('2025-07-06 10:11:03.123456789',9)"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2025-07-06 10:11:03.123456789',9) ` + - `LIMIT 10`, + "SELECT `message` " + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2025-07-06 10:11:03.123456789',9) " + + "LIMIT 10", }, []string{}, }, @@ -2475,13 +2505,15 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2025-07-06 09:36:03.2551236',7)`}, + []string{ + "`@timestamp` = toDateTime64('2025-07-06 09:36:03.2551236',7)", + }, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2025-07-06 09:36:03.2551236',7) ` + - `LIMIT 10`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2025-07-06 09:36:03.2551236',7) " + + "LIMIT 10", }, []string{}, }, @@ -2496,13 +2528,15 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2025-07-06 09:38:03.1',1)`}, + []string{ + "`@timestamp` = toDateTime64('2025-07-06 09:38:03.1',1)", + }, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2025-07-06 09:38:03.1',1) ` + - `LIMIT 10`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2025-07-06 09:38:03.1',1) " + + "LIMIT 10", }, []string{}, }, @@ -2519,13 +2553,15 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" IN tuple(toDateTime64('2024-12-21 07:29:03.367',3), toDateTime64('2024-12-21 07:29:02.992',3))`}, + []string{ + "`@timestamp` IN tuple(toDateTime64('2024-12-21 07:29:03.367',3), toDateTime64('2024-12-21 07:29:02.992',3))", + }, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" IN tuple(toDateTime64('2024-12-21 07:29:03.367',3), toDateTime64('2024-12-21 07:29:02.992',3)) ` + - `LIMIT 10000`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` IN tuple(toDateTime64('2024-12-21 07:29:03.367',3), toDateTime64('2024-12-21 07:29:02.992',3)) " + + "LIMIT 10000", }, []string{}, }, @@ -2539,13 +2575,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2024-12-21 07:29:03.367',3)`}, + []string{"`@timestamp` = toDateTime64('2024-12-21 07:29:03.367',3)"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2024-12-21 07:29:03.367',3) ` + - `LIMIT 10000`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2024-12-21 07:29:03.367',3) " + + "LIMIT 10000", }, []string{}, }, @@ -2559,13 +2595,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2024-12-21 07:29:03.123456789',9)`}, + []string{"`@timestamp` = toDateTime64('2024-12-21 07:29:03.123456789',9)"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2024-12-21 07:29:03.123456789',9) ` + - `LIMIT 10000`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2024-12-21 07:29:03.123456789',9) " + + "LIMIT 10000", }, []string{}, }, @@ -2579,14 +2615,14 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2024-12-21 07:29:03.',0)`}, + []string{"`@timestamp` = toDateTime64('2024-12-21 07:29:03.',0)"}, // dot at the end doesn't matter - CH accepts it exactly like it wasn't there model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2024-12-21 07:29:03.',0) ` + - `LIMIT 10000`, + "SELECT `message`" + + "FROM `" + TableName + "`" + + "WHERE `@timestamp` = toDateTime64('2024-12-21 07:29:03.',0)" + + "LIMIT 10000", }, []string{}, }, @@ -2600,13 +2636,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - []string{`"@timestamp" = toDateTime64('2024-12-21 07:29:03.3',1)`}, + []string{"`@timestamp` = toDateTime64('2024-12-21 07:29:03.3',1)"}, model.ListAllFields, []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE "@timestamp" = toDateTime64('2024-12-21 07:29:03.3',1) ` + - `LIMIT 10000`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE `@timestamp` = toDateTime64('2024-12-21 07:29:03.3',1) " + + "LIMIT 10000", }, []string{}, }, @@ -2631,13 +2667,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - WantedSql: []string{`("tsAsUInt64">='2025-03-25T12:32:51.527Z' AND "tsAsUInt64"<='2025-03-25T12:47:51.527Z')`}, + WantedSql: []string{"(`tsAsUInt64`>='2025-03-25T12:32:51.527Z' AND `tsAsUInt64`<='2025-03-25T12:47:51.527Z')"}, WantedQueryType: model.ListAllFields, WantedRegexes: []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE ("tsAsUInt64">=1742905971527 AND "tsAsUInt64"<=1742906871527) ` + - `LIMIT 10`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE (`tsAsUInt64`>=1742905971527 AND `tsAsUInt64`<=1742906871527) " + + "LIMIT 10", }, }, { // [55] @@ -2660,13 +2696,13 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }, "track_total_hits": false }`, - WantedSql: []string{`("tsAsUInt64">=15 AND "tsAsUInt64"<=2025)`}, + WantedSql: []string{"(`tsAsUInt64`>=15 AND `tsAsUInt64`<=2025)"}, WantedQueryType: model.ListAllFields, WantedRegexes: []string{ - `SELECT "message" ` + - `FROM ` + TableName + ` ` + - `WHERE ("tsAsUInt64">=15 AND "tsAsUInt64"<=1735689600000) ` + - `LIMIT 10`, + "SELECT `message`" + + "FROM `" + TableName + "` " + + "WHERE (`tsAsUInt64`>=15 AND `tsAsUInt64`<=1735689600000) " + + "LIMIT 10", }, }, { // [56] @@ -2683,7 +2719,9 @@ Men\\'s Clothing \\\\ %' LIMIT 10`}, }`, []string{"true"}, model.ListAllFields, - []string{`SELECT "message" FROM ` + TableName + " WHERE true"}, + []string{ + "SELECT `message` FROM `" + TableName + "` WHERE true", + }, []string{}, }, } @@ -2714,7 +2752,7 @@ var TestSearchRuntimeMappings = []SearchTestCase{ model.ListAllFields, ////[]model.Query{newSimplestQuery()}, []string{ - `SELECT toHour("@timestamp") AS "hour_of_day" FROM ` + TableName + ` LIMIT 10`, + "SELECT toHour(`@timestamp`) AS `hour_of_day` FROM `__quesma_table_name` LIMIT 10", }, []string{}, }, @@ -2755,10 +2793,10 @@ var TestsSearchNoAttrs = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`((("@timestamp">=fromUnixTimestamp64Milli(1706188965968) AND "@timestamp"<=fromUnixTimestamp64Milli(1706189865968)) AND "summary" IS NOT NULL) AND NOT ("run_once" IS NOT NULL))`}, + []string{"(((`@timestamp`>=fromUnixTimestamp64Milli(1706188965968) AND `@timestamp`<=fromUnixTimestamp64Milli(1706189865968)) AND `summary` IS NOT NULL) AND NOT (`run_once` IS NOT NULL))"}, model.ListAllFields, []string{ - `SELECT "@timestamp", "message" FROM __quesma_table_name WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1706188965968) AND "@timestamp"<=fromUnixTimestamp64Milli(1706189865968)) AND NULL IS NOT NULL) AND NOT (NULL IS NOT NULL)) LIMIT 10`, + "SELECT `@timestamp`, `message` FROM `__quesma_table_name` WHERE (((`@timestamp`>=fromUnixTimestamp64Milli(1706188965968) AND `@timestamp`<=fromUnixTimestamp64Milli(1706189865968)) AND NULL IS NOT NULL) AND NOT (NULL IS NOT NULL)) LIMIT 10", }, []string{}, }, @@ -2807,12 +2845,12 @@ var TestSearchFilter = []SearchTestCase{ model.Normal, []string{}, []string{ - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0" - , count(*) AS "aggr__0__count" - FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + "SELECT toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `aggr__0__key_0`\n" + + " , count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, }, { // [1] @@ -2864,14 +2902,14 @@ var TestSearchFilter = []SearchTestCase{ model.Normal, []string{}, []string{ - `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE "@timestamp">subDate(now(), INTERVAL 15 minute) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE `@timestamp`>subDate(now(), INTERVAL 15 minute)\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, }, { // [2] @@ -2925,14 +2963,14 @@ var TestSearchFilter = []SearchTestCase{ model.Normal, []string{}, []string{ - `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0", - count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ("@timestamp">=fromUnixTimestamp64Milli(1727858503270) AND "@timestamp"<=fromUnixTimestamp64Milli(1727859403270)) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + "SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n" + + " toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS `aggr__0__key_0`,\n" + + " count(*) AS `aggr__0__count`\n" + + "FROM `__quesma_table_name`\n" + + "WHERE (`@timestamp`>=fromUnixTimestamp64Milli(1727858503270) AND `@timestamp`<=fromUnixTimestamp64Milli(1727859403270))\n" + + "GROUP BY toInt64(toUnixTimestamp64Milli(`@timestamp`) / 30000) AS\n" + + " `aggr__0__key_0`\n" + + "ORDER BY `aggr__0__key_0` ASC", }, }, { // [3] @@ -2949,7 +2987,9 @@ var TestSearchFilter = []SearchTestCase{ []string{}, model.Normal, []string{}, - []string{`SELECT "@timestamp", "message" FROM __quesma_table_name LIMIT 10`}, + []string{ + "SELECT `@timestamp`, `message` FROM `__quesma_table_name` LIMIT 10", + }, }, { // [4] "Empty filter with other clauses", @@ -2977,16 +3017,16 @@ var TestSearchFilter = []SearchTestCase{ "track_total_hits": false }`, []string{ - `("user.id"='kimchy' AND ("tags"='env1' OR "tags"='deployed')) AND NOT ("age"<=20 AND "age">=10)`, - `("user.id"='kimchy' AND ("tags"='env1' OR "tags"='deployed')) AND NOT ("age">=10 AND "age"<=20)`, + "(`user.id`='kimchy' AND (`tags`='env1' OR `tags`='deployed')) AND NOT (`age`<=20 AND `age`>=10)", + "(`user.id`='kimchy' AND (`tags`='env1' OR `tags`='deployed')) AND NOT (`age`>=10 AND `age`<=20)", }, model.Normal, []string{ - `SELECT "@timestamp", "message" ` + - `FROM ` + TableName + ` ` + - `WHERE (("attributes_values"['user.id']='kimchy' AND ("attributes_values"['tags']='env1' OR "attributes_values"['tags']='deployed')) ` + - `AND NOT (("attributes_values"['age']>=10 AND "attributes_values"['age']<=20))) ` + - `LIMIT 10`, + "SELECT `@timestamp`, `message` " + + "FROM `__quesma_table_name` " + + "WHERE ((`attributes_values`['user.id']='kimchy' AND (`attributes_values`['tags']='env1' OR `attributes_values`['tags']='deployed')) " + + "AND NOT ((`attributes_values`['age']>=10 AND `attributes_values`['age']<=20))) " + + "LIMIT 10", }, []string{}, }, diff --git a/platform/testdata/requests_no_full_text_fields.go b/platform/testdata/requests_no_full_text_fields.go index 439be3648..4b544f5de 100644 --- a/platform/testdata/requests_no_full_text_fields.go +++ b/platform/testdata/requests_no_full_text_fields.go @@ -102,7 +102,7 @@ var TestsSearchNoFullTextFields = []SearchTestCase{ } }`, WantedSql: []string{ - `((((("__quesma_fulltext_field_name" iLIKE '%quick%' AND "__quesma_fulltext_field_name" iLIKE '%fox%') OR ("__quesma_fulltext_field_name" iLIKE '%brown%' AND "__quesma_fulltext_field_name" iLIKE '%fox%')) OR "__quesma_fulltext_field_name" iLIKE '%fox%') AND NOT ("__quesma_fulltext_field_name" iLIKE '%news%')) AND ("timestamp">='2024-03-26T09:56:02.241Z' AND "timestamp"<='2024-04-10T08:56:02.241Z'))`, + "(((((`__quesma_fulltext_field_name` iLIKE '%quick%' AND `__quesma_fulltext_field_name` iLIKE '%fox%') OR (`__quesma_fulltext_field_name` iLIKE '%brown%' AND `__quesma_fulltext_field_name` iLIKE '%fox%')) OR `__quesma_fulltext_field_name` iLIKE '%fox%') AND NOT (`__quesma_fulltext_field_name` iLIKE '%news%')) AND (`timestamp`>='2024-03-26T09:56:02.241Z' AND `timestamp`<='2024-04-10T08:56:02.241Z'))", }, WantedQueryType: model.ListAllFields, //WantedQuery: []model.Query{ diff --git a/platform/testdata/requests_with_special_characters.go b/platform/testdata/requests_with_special_characters.go index e5eebeb10..2e7fcd25a 100644 --- a/platform/testdata/requests_with_special_characters.go +++ b/platform/testdata/requests_with_special_characters.go @@ -144,32 +144,6 @@ var AggregationTestsWithSpecialCharactersInFieldNames = []AggregationTestCase{ } }`, ExpectedPancakeResults: []model.QueryResultRow{}, // checking only the SQLs is enough for now - ExpectedPancakeSQL: `WITH quesma_top_hits_group_table AS ( - SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", - toInt64(toUnixTimestamp64Milli("__timestamp") / 43200000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE "message_____" IS NOT NULL - GROUP BY toInt64(toUnixTimestamp64Milli("__timestamp") / 43200000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC) , - quesma_top_hits_join AS ( - SELECT "group_table"."metric____quesma_total_count_col_0" AS - "metric____quesma_total_count_col_0", - "group_table"."aggr__0__key_0" AS "aggr__0__key_0", - "group_table"."aggr__0__count" AS "aggr__0__count", - "hit_table"."__bytes" AS "top_metrics__0__1_col_0", - "hit_table"."__timestamp" AS "top_metrics__0__1_col_1", - ROW_NUMBER() OVER (PARTITION BY "group_table"."aggr__0__key_0" ORDER BY - "__timestamp" DESC) AS "top_hits_rank" - FROM quesma_top_hits_group_table AS "group_table" LEFT OUTER JOIN - __quesma_table_name AS "hit_table" ON ("group_table"."aggr__0__key_0"= - toInt64(toUnixTimestamp64Milli("__timestamp") / 43200000)) - WHERE "message_____" IS NOT NULL) - SELECT "metric____quesma_total_count_col_0", "aggr__0__key_0", "aggr__0__count", - "top_metrics__0__1_col_0", "top_metrics__0__1_col_1", "top_hits_rank" - FROM "quesma_top_hits_join" - WHERE "top_hits_rank"<=1 - ORDER BY "aggr__0__key_0" ASC, "top_hits_rank" ASC`, + ExpectedPancakeSQL: "WITH quesma_top_hits_group_table AS (\n SELECT sum(count(*)) OVER () AS `metric____quesma_total_count_col_0`,\n toInt64(toUnixTimestamp64Milli(`__timestamp`) / 43200000) AS `aggr__0__key_0\n `, count(*) AS `aggr__0__count`\n FROM `__quesma_table_name`\n WHERE `message_____` IS NOT NULL\n GROUP BY toInt64(toUnixTimestamp64Milli(`__timestamp`) / 43200000) AS `\n aggr__0__key_0`\n ORDER BY `aggr__0__key_0` ASC) ,\nquesma_top_hits_join AS (\n SELECT `group_table`.`metric____quesma_total_count_col_0` AS `\n metric____quesma_total_count_col_0`,\n `group_table`.`aggr__0__key_0` AS `aggr__0__key_0`,\n `group_table`.`aggr__0__count` AS `aggr__0__count`,\n `hit_table`.`__bytes` AS `top_metrics__0__1_col_0`,\n `hit_table`.`__timestamp` AS `top_metrics__0__1_col_1`,\n ROW_NUMBER() OVER (PARTITION BY `group_table`.`aggr__0__key_0` ORDER BY `\n __timestamp` DESC) AS `top_hits_rank`\n FROM quesma_top_hits_group_table AS `group_table` LEFT OUTER JOIN `\n __quesma_table_name` AS `hit_table` ON (`group_table`.`aggr__0__key_0`=\n toInt64(toUnixTimestamp64Milli(`__timestamp`) / 43200000))\n WHERE `message_____` IS NOT NULL)\nSELECT `metric____quesma_total_count_col_0`, `aggr__0__key_0`, `aggr__0__count`,\n `top_metrics__0__1_col_0`, `top_metrics__0__1_col_1`, `top_hits_rank`\nFROM `quesma_top_hits_join`\nWHERE `top_hits_rank`<=1\nORDER BY `aggr__0__key_0` ASC, `top_hits_rank` ASC", }, } diff --git a/platform/util/utils.go b/platform/util/utils.go index 464a6feb4..76dc5569c 100644 --- a/platform/util/utils.go +++ b/platform/util/utils.go @@ -759,6 +759,14 @@ func SingleQuote(value string) string { return "'" + value + "'" } +// EscapeStringNormal is a simple helper function: str -> `str` +func BackquoteIdentifier(identifier string) string { + if len(identifier) >= 2 && identifier[0] == '`' && identifier[len(identifier)-1] == '`' { + return identifier + } + return "`" + identifier + "`" +} + // IsSingleQuoted checks if a string is single-quoted func IsSingleQuoted(s string) bool { return len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\''